diff --git a/deploy/esignet-apitestrig/README.md b/deploy/esignet-apitestrig/README.md
new file mode 100644
index 000000000..a5e277981
--- /dev/null
+++ b/deploy/esignet-apitestrig/README.md
@@ -0,0 +1,44 @@
+# APITESTRIG
+
+## Introduction
+ApiTestRig will test the working of APIs of the MOSIP modules.
+
+## Install
+* Review `values.yaml` and, Make sure to enable required modules for apitestrig operation.
+* Install
+```sh
+./install.sh
+```
+* During the execution of the `install.sh` script, a prompt appears requesting information regarding the presence of a public domain and a valid SSL certificate on the server.
+* If the server lacks a public domain and a valid SSL certificate, it is advisable to select the `n` option. Opting it will enable the `init-container` with an `emptyDir` volume and include it in the deployment process.
+* The init-container will proceed to download the server's self-signed SSL certificate and mount it to the specified location within the container's Java keystore (i.e., `cacerts`) file.
+* This particular functionality caters to scenarios where the script needs to be employed on a server utilizing self-signed SSL certificates.
+
+## Uninstall
+* To uninstall ApiTestRig, run `delete.sh` script.
+```sh
+./delete.sh
+```
+
+## Run apitestrig manually
+
+#### Rancher UI
+* Run apitestrig manually via Rancher UI.
+ ![apitestrig-2.png](../../docs/apitestrig-2.png)
+* There are two modes of apitestrig `smoke` & `smokeAndRegression`.
+* By default, apitestrig will execute with `smokeAndRegression`.
+ If you want to run apitestrig with only `smoke`.
+ You have to update the `apitestrig` configmap and rerun the specific apitestrig job.
+
+#### CLI
+* Download Kubernetes cluster `kubeconfig` file from `rancher dashboard` to your local.
+ ![apitestrig-1.png](../../docs/apitestrig-1.png)
+* Install `kubectl` package to your local machine.
+* Run apitestrig manually via CLI by creating a new job from an existing k8s cronjob.
+ ```
+ kubectl --kubeconfig= -n apitestrig create job --from=cronjob/
+ ```
+ example:
+ ```
+ kubectl --kubeconfig=/home/xxx/Downloads/qa4.config -n apitestrig create job --from=cronjob/cronjob-apitestrig-masterdata cronjob-apitestrig-masterdata
+ ```
\ No newline at end of file
diff --git a/deploy/esignet-apitestrig/delete.sh b/deploy/esignet-apitestrig/delete.sh
new file mode 100755
index 000000000..bdd4ca061
--- /dev/null
+++ b/deploy/esignet-apitestrig/delete.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Uninstalls apitestrig
+## Usage: ./delete.sh [kubeconfig]
+
+if [ $# -ge 1 ] ; then
+ export KUBECONFIG=$1
+fi
+
+function deleting_apitestrig() {
+ NS=esignet
+ while true; do
+ read -p "Are you sure you want to delete apitestrig helm charts?(Y/n) " yn
+ if [ $yn = "Y" ]
+ then
+ helm -n $NS delete esignet-apitestrig
+ break
+ else
+ break
+ fi
+ done
+ return 0
+}
+
+# set commands for error handling.
+set -e
+set -o errexit ## set -e : exit the script if any statement returns a non-true return value
+set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
+set -o errtrace # trace ERR through 'time command' and other functions
+set -o pipefail # trace ERR through pipes
+deleting_apitestrig # calling function
diff --git a/deploy/esignet-apitestrig/install.sh b/deploy/esignet-apitestrig/install.sh
new file mode 100755
index 000000000..04e570236
--- /dev/null
+++ b/deploy/esignet-apitestrig/install.sh
@@ -0,0 +1,181 @@
+#!/bin/bash
+# Installs apitestrig
+## Usage: ./install.sh [kubeconfig]
+
+if [ $# -ge 1 ] ; then
+ export KUBECONFIG=$1
+fi
+
+NS=esignet
+CHART_VERSION=1.5.0-develop
+COPY_UTIL=../copy_cm_func.sh
+
+echo Create $NS namespace
+kubectl create ns $NS
+
+function installing_apitestrig() {
+ echo Istio label
+ kubectl label ns $NS istio-injection=disabled --overwrite
+ helm repo update
+
+ echo Copy Configmaps
+ $COPY_UTIL configmap global default $NS
+ $COPY_UTIL configmap keycloak-host keycloak $NS
+ $COPY_UTIL configmap artifactory-share artifactory $NS
+ $COPY_UTIL configmap config-server-share config-server $NS
+
+ echo echo Copy Secrtes
+ $COPY_UTIL secret keycloak-client-secrets keycloak $NS
+ $COPY_UTIL secret s3 s3 $NS
+ $COPY_UTIL secret postgres-postgresql postgres $NS
+
+ echo "Delete s3, db, & apitestrig configmap if exists"
+ kubectl -n $NS delete --ignore-not-found=true configmap s3
+ kubectl -n $NS delete --ignore-not-found=true configmap db
+ kubectl -n $NS delete --ignore-not-found=true configmap apitestrig
+
+ DB_HOST=$( kubectl -n default get cm global -o json |jq -r '.data."mosip-api-internal-host"' )
+ API_INTERNAL_HOST=$( kubectl -n default get cm global -o json |jq -r '.data."mosip-api-internal-host"' )
+ ENV_USER=$( kubectl -n default get cm global -o json |jq -r '.data."mosip-api-internal-host"' | awk -F '.' '/api-internal/{print $1"."$2}')
+
+ read -p "Please enter the time(hr) to run the cronjob every day (time: 0-23) : " time
+ if [ -z "$time" ]; then
+ echo "ERROT: Time cannot be empty; EXITING;";
+ exit 1;
+ fi
+ if ! [ $time -eq $time ] 2>/dev/null; then
+ echo "ERROR: Time $time is not a number; EXITING;";
+ exit 1;
+ fi
+ if [ $time -gt 23 ] || [ $time -lt 0 ] ; then
+ echo "ERROR: Time should be in range ( 0-23 ); EXITING;";
+ exit 1;
+ fi
+
+ echo "Do you have public domain & valid SSL? (Y/n) "
+ echo "Y: if you have public domain & valid ssl certificate"
+ echo "n: If you don't have a public domain and a valid SSL certificate. Note: It is recommended to use this option only in development environments."
+ read -p "" flag
+
+ if [ -z "$flag" ]; then
+ echo "'flag' was provided; EXITING;"
+ exit 1;
+ fi
+ ENABLE_INSECURE=''
+ if [ "$flag" = "n" ]; then
+ ENABLE_INSECURE='--set enable_insecure=true';
+ fi
+
+ read -p "Please provide the retention days to remove old reports ( Default: 3 )" reportExpirationInDays
+
+ if [[ -z $reportExpirationInDays ]]; then
+ reportExpirationInDays=3
+ fi
+ if ! [[ $reportExpirationInDays =~ ^[0-9]+$ ]]; then
+ echo "The variable \"reportExpirationInDays\" should contain only number; EXITING";
+ exit 1;
+ fi
+
+ read -p "Please provide slack webhook URL to notify server end issues on your slack channel : " slackWebhookUrl
+
+ if [ -z $slackWebhookUrl ]; then
+ echo "slack webhook URL not provided; EXITING;"
+ exit 1;
+ fi
+
+ valid_inputs=("yes" "no")
+ eSignetDeployed=""
+
+ while [[ ! " ${valid_inputs[@]} " =~ " ${eSignetDeployed} " ]]; do
+ read -p "Is the eSignet service deployed? (yes/no): " eSignetDeployed
+ eSignetDeployed=${eSignetDeployed,,} # Convert input to lowercase
+ done
+
+ if [[ $eSignetDeployed == "yes" ]]; then
+ echo "eSignet service is deployed. Proceeding with installation..."
+ else
+ echo "eSignet service is not deployed. hence will be skipping esignet related test-cases..."
+ fi
+ read -p "Is values.yaml for apitestrig chart set correctly as part of pre-requisites? (Y/n) : " yn;
+ if [[ $yn = "Y" ]] || [[ $yn = "y" ]] ; then
+ NFS_OPTION=''
+ S3_OPTION=''
+ config_complete=false # flag to check if S3 or NFS is configured
+ while [ "$config_complete" = false ]; do
+ read -p "Do you have S3 details for storing apitestrig reports? (Y/n) : " ans
+ if [[ "$ans" == "y" || "$ans" == "Y" ]]; then
+ read -p "Please provide S3 host: " s3_host
+ if [[ -z $s3_host ]]; then
+ echo "S3 host not provided; EXITING;"
+ exit 1;
+ fi
+ read -p "Please provide S3 region: " s3_region
+ if [[ $s3_region == *[' !@#$%^&*()+']* ]]; then
+ echo "S3 region should not contain spaces or special characters; EXITING;"
+ exit 1;
+ fi
+
+ read -p "Please provide S3 access key: " s3_user_key
+ if [[ -z $s3_user_key ]]; then
+ echo "S3 access key not provided; EXITING;"
+ exit 1;
+ fi
+ S3_OPTION="--set apitestrig.configmaps.s3.s3-host=$s3_host --set apitestrig.configmaps.s3.s3-user-key=$s3_user_key --set apitestrig.configmaps.s3.s3-region=$s3_region"
+ push_reports_to_s3="yes"
+ config_complete=true
+ elif [[ "$ans" == "n" || "$ans" == "N" ]]; then
+ push_reports_to_s3="no"
+ read -p "Since S3 details are not available, do you want to use NFS directory mount for storing reports? (y/n) : " answer
+ if [[ $answer == "Y" ]] || [[ $answer == "y" ]]; then
+ read -p "Please provide NFS Server IP: " nfs_server
+ if [[ -z $nfs_server ]]; then
+ echo "NFS server not provided; EXITING."
+ exit 1;
+ fi
+ read -p "Please provide NFS directory to store reports from NFS server (e.g. /srv/nfs//apitestrig/), make sure permission is 777 for the folder: " nfs_path
+ if [[ -z $nfs_path ]]; then
+ echo "NFS Path not provided; EXITING."
+ exit 1;
+ fi
+ NFS_OPTION="--set apitestrig.volumes.reports.nfs.server=$nfs_server --set apitestrig.volumes.reports.nfs.path=$nfs_path"
+ config_complete=true
+ else
+ echo "Please rerun the script with either S3 or NFS server details."
+ exit 1;
+ fi
+ else
+ echo "Invalid input. Please respond with Y (yes) or N (no)."
+ fi
+ done
+ echo Installing esignet apitestrig
+ helm -n $NS install esignet-apitestrig mosip/apitestrig \
+ --set crontime="0 $time * * *" \
+ -f values.yaml \
+ --version $CHART_VERSION \
+ $NFS_OPTION \
+ $S3_OPTION \
+ --set apitestrig.variables.push_reports_to_s3=$push_reports_to_s3 \
+ --set apitestrig.configmaps.db.db-server="$DB_HOST" \
+ --set apitestrig.configmaps.db.db-su-user="postgres" \
+ --set apitestrig.configmaps.db.db-port="5432" \
+ --set apitestrig.configmaps.apitestrig.ENV_USER="$ENV_USER" \
+ --set apitestrig.configmaps.apitestrig.ENV_ENDPOINT="https://$API_INTERNAL_HOST" \
+ --set apitestrig.configmaps.apitestrig.ENV_TESTLEVEL="smokeAndRegression" \
+ --set apitestrig.configmaps.apitestrig.reportExpirationInDays="$reportExpirationInDays" \
+ --set apitestrig.configmaps.apitestrig.slack-webhook-url="$slackWebhookUrl" \
+ --set apitestrig.configmaps.apitestrig.eSignetDeployed="$eSignetDeployed" \
+ --set apitestrig.configmaps.apitestrig.NS="$NS" \
+ $ENABLE_INSECURE
+
+ echo Installed esignet apitestrig.
+ return 0
+ fi
+}
+
+# set commands for error handling.
+set -e
+set -o errexit ## set -e : exit the script if any statement returns a non-true return value
+set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
+set -o errtrace # trace ERR through 'time command' and other functions
+set -o pipefail # trace ERR through pipes
+installing_apitestrig # calling function
diff --git a/deploy/esignet-apitestrig/values.yaml b/deploy/esignet-apitestrig/values.yaml
new file mode 100644
index 000000000..54a09c8bf
--- /dev/null
+++ b/deploy/esignet-apitestrig/values.yaml
@@ -0,0 +1,15 @@
+modules:
+ esignet:
+ enabled: true
+ image:
+ repository: mosipqa/apitest-esignet
+ tag: develop
+ pullPolicy: Always
+
+resources:
+ limits:
+ cpu: 300m
+ memory: 500Mi
+ requests:
+ cpu: 300m
+ memory: 500Mi
diff --git a/deploy/postgres/.gitignore b/deploy/postgres/.gitignore
index 38b1baeae..6d5b3a287 100644
--- a/deploy/postgres/.gitignore
+++ b/deploy/postgres/.gitignore
@@ -1,4 +1,4 @@
db-common-secrets.yaml
-esignet-postgres-postgresql.yaml
+postgres-postgresql.yaml
postgres-host.yaml
.*.swp
diff --git a/deploy/postgres/chart/istio-addons/templates/gateway.yaml b/deploy/postgres/chart/istio-addons/templates/gateway.yaml
index c1e96ab0e..97619efa1 100644
--- a/deploy/postgres/chart/istio-addons/templates/gateway.yaml
+++ b/deploy/postgres/chart/istio-addons/templates/gateway.yaml
@@ -11,4 +11,4 @@ spec:
name: postgres
protocol: TCP
hosts:
- - {{ .Values.postgresHost }}
+ - {{ .Values.postgresHost }}
\ No newline at end of file
diff --git a/deploy/postgres/chart/istio-addons/templates/vs.yaml b/deploy/postgres/chart/istio-addons/templates/vs.yaml
index 30843dd48..5cae69b5a 100644
--- a/deploy/postgres/chart/istio-addons/templates/vs.yaml
+++ b/deploy/postgres/chart/istio-addons/templates/vs.yaml
@@ -1,7 +1,7 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
- name: esignet-postgres
+ name: postgres
spec:
hosts:
- "*"
@@ -12,8 +12,7 @@ spec:
- port: 5432
route:
- destination:
- host: esignet-postgres-postgresql
+ host: postgres-postgresql
port:
number: 5432
-
diff --git a/deploy/postgres/chart/istio-addons/values.yaml b/deploy/postgres/chart/istio-addons/values.yaml
index c05d63fab..7770edb91 100644
--- a/deploy/postgres/chart/istio-addons/values.yaml
+++ b/deploy/postgres/chart/istio-addons/values.yaml
@@ -1 +1 @@
-postgresHost: esignet-postgres.sandbox.xyz.net
+postgresHost: postgres.sandbox.xyz.net
diff --git a/deploy/postgres/delete.sh b/deploy/postgres/delete.sh
index 41020f325..61186c043 100755
--- a/deploy/postgres/delete.sh
+++ b/deploy/postgres/delete.sh
@@ -7,16 +7,16 @@ if [ $# -ge 1 ] ; then
fi
function deleting_postgres() {
- NS=esignet
+ NS=postgres
while true; do
read -p "CAUTION: PVC, PV will get deleted. If your PV is not in 'Retain' mode all Postgres data will be lost. Are you sure? (Y/n): " yn
if [ "$yn" = "Y" ] || [ "$yn" = "y" ]; then
echo "Deleting Postgres resources..."
- helm -n $NS delete esignet-postgres || echo "Failed to delete esignet-postgres helm release"
+ helm -n $NS delete postgres || echo "Failed to delete postgres helm release"
helm -n $NS delete istio-addons || echo "Failed to delete istio-addons helm release"
- kubectl -n $NS delete pvc data-esignet-postgres-postgresql-0 || echo "Failed to delete PVC"
- helm -n $NS delete esignet-postgres-init || echo "Failed to delete esignet-postgres-init helm release"
- kubectl -n $NS delete secret esignet-postgres-postgresql || echo "Failed to delete esignet-postgres-init secret"
+ kubectl -n $NS delete pvc data-postgres-postgresql-0 || echo "Failed to delete PVC"
+ helm -n $NS delete postgres-init || echo "Failed to delete postgres-init helm release"
+ kubectl -n $NS delete secret postgres-postgresql || echo "Failed to delete postgres-init secret"
kubectl -n $NS delete secret db-common-secrets || echo "Failed to delete db-common-secrets secret"
break
elif [ "$yn" = "N" ] || [ "$yn" = "n" ]; then
diff --git a/deploy/postgres/generate-secret-cm.py b/deploy/postgres/generate-secret-cm.py
index 46403b2a6..acf85875f 100644
--- a/deploy/postgres/generate-secret-cm.py
+++ b/deploy/postgres/generate-secret-cm.py
@@ -73,7 +73,7 @@ def create_or_update_configmap(configmap_name, namespace, postgres_host, postgre
os.system(f"kubectl create -f {yaml_file} --save-config")
# Main script logic
-namespace = "esignet"
+namespace = "postgres"
check_namespace(namespace)
# Handle db-dbuser-password secret
@@ -91,7 +91,7 @@ def create_or_update_configmap(configmap_name, namespace, postgres_host, postgre
create_or_update_secret(db_secret_name, namespace, "db-dbuser-password", password)
# Handle postgres-password secret
-postgres_secret_name = "esignet-postgres-postgresql"
+postgres_secret_name = "postgres-postgresql"
if secret_exists(postgres_secret_name, namespace):
overwrite = input(f"Secret '{postgres_secret_name}' already exists in namespace '{namespace}'. Overwrite? (y/n): ")
if overwrite.lower() == 'y':
@@ -105,7 +105,7 @@ def create_or_update_configmap(configmap_name, namespace, postgres_host, postgre
create_or_update_secret(postgres_secret_name, namespace, "postgres-password", postgres_password)
# Handle ConfigMap creation for PostgreSQL
-configmap_name = "esignet-postgres-config"
+configmap_name = "postgres-config"
if configmap_exists(configmap_name, namespace):
overwrite = input(f"ConfigMap '{configmap_name}' already exists in namespace '{namespace}'. Overwrite? (y/n): ")
if overwrite.lower() == 'y':
diff --git a/deploy/postgres/init_values.yaml b/deploy/postgres/init_values.yaml
index f82530230..5d49e6bdc 100644
--- a/deploy/postgres/init_values.yaml
+++ b/deploy/postgres/init_values.yaml
@@ -4,12 +4,12 @@ dbUserPasswords:
databases:
mosip_esignet:
enabled: true
- host: "esignet-postgres-postgresql.esignet"
+ host: "postgres-postgresql"
port: 5432
su:
user: postgres
secret:
- name: esignet-postgres-postgresql
+ name: postgres-postgresql
key: postgres-password
dml: 1
repoUrl: https://github.com/mosip/esignet.git
diff --git a/deploy/postgres/install.sh b/deploy/postgres/install.sh
index 7bc4bc687..e94d6da16 100755
--- a/deploy/postgres/install.sh
+++ b/deploy/postgres/install.sh
@@ -6,7 +6,7 @@ if [ $# -ge 1 ] ; then
export KUBECONFIG=$1
fi
-NS=esignet
+NS=postgres
# Function to check and delete secret if it exists
function check_and_delete_secret() {
@@ -16,7 +16,8 @@ function check_and_delete_secret() {
if kubectl -n $secret_namespace get secret $secret_name > /dev/null 2>&1; then
echo "Secret $secret_name exists in namespace $secret_namespace."
while true; do
- read -p "Do you want to delete secret $secret_name before installation? (Y/n): " yn
+ POSTGRES_HOST=$(kubectl -n esignet get cm esignet-global -o jsonpath={.data.mosip-postgres-host})
+ helm -n $NS install istio-addons chart/istio-addons --set postg read -p "Do you want to delete secret $secret_name before installation? (Y/n): " yn
if [ "$yn" = "Y" ] || [ "$yn" = "y" ]; then
echo "Deleting secret $secret_name..."
kubectl -n $secret_namespace delete secret $secret_name || { echo "Failed to delete secret $secret_name"; exit 1; }
@@ -35,7 +36,7 @@ function check_and_delete_secret() {
function installing_postgres() {
# Check and handle the existing secret
- check_and_delete_secret "esignet-postgres-postgresql" $NS
+ check_and_delete_secret "postgres-postgresql" $NS
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
@@ -44,18 +45,25 @@ function installing_postgres() {
kubectl label ns $NS istio-injection=enabled --overwrite
echo Installing Postgres
- helm -n $NS install esignet-postgres bitnami/postgresql --version 13.1.5 -f values.yaml --wait
- echo Installed Postgres
+ helm -n $NS install postgres bitnami/postgresql --version 13.1.5 -f values.yaml --wait
+ # Run the Python script to generate secrets and configmap
+ if [ -f generate-secret-cm.py ]; then
+ echo "Running generate_secret.py to create Postgres secrets and configmap..."
+ python3 generate-secret-cm.py || { echo "Failed to run generate_secret.py"; exit 1; }
+ echo "Secrets and configmap generated successfully."
+ else
+ echo "Error: generate-secret-cm.py not found. Ensure the script is in the current directory."
+ exit 1
+ fi
echo Installing gateways and virtual services
POSTGRES_HOST=$(kubectl -n esignet get cm esignet-global -o jsonpath={.data.mosip-postgres-host})
helm -n $NS install istio-addons chart/istio-addons --set postgresHost=$POSTGRES_HOST --wait
- kubectl apply -f postgres-config.yaml
return 0
}
# Prompt the user if they want to install PostgreSQL
while true; do
- read -p "Do you want to install default Postgres in esignet namespace? (y/n): " answer
+ read -p "Do you want to install default Postgres ? (y/n): " answer
if [ "$answer" = "Y" ] || [ "$answer" = "y" ]; then
echo "Continuing with Postgres server deployment..."
break # Proceed with the installation
@@ -63,13 +71,13 @@ while true; do
# Prompt the user for further options
while true; do
echo "You opted not to install Postgres. What would you like to do next?"
- echo "1. Skip Postgres server installation and configuration in esignet namespace."
- echo "2. Configure external Postgres details by generating secrets and configmap in esignet namespace."
+ echo "1. Skip Postgres server installation and configuration."
+ echo "2. Configure external Postgres details by generating secrets and configmap ."
read -p "Enter your choice (1/2): " option
if [ "$option" = "1" ]; then
- echo "Skipping Postgres server installation and configuration in esignet namespace."
+ echo "Skipping Postgres server installation and configuration in namespace."
exit 0 # Exit the script as the user chose to skip Postgres installation
elif [ "$option" = "2" ]; then
echo "Running generate_secret.py to create Postgres secrets and configmap..."
@@ -84,6 +92,7 @@ while true; do
echo "Please provide a correct option (Y or N)"
fi
done
+
# set commands for error handling.
set -e
set -o errexit ## set -e : exit the script if any statement returns a non-true return value
diff --git a/deploy/postgres/nginx/transportserver.yaml b/deploy/postgres/nginx/transportserver.yaml
index c031467aa..9b51871ca 100644
--- a/deploy/postgres/nginx/transportserver.yaml
+++ b/deploy/postgres/nginx/transportserver.yaml
@@ -9,7 +9,7 @@ spec:
protocol: TCP
upstreams:
- name: postgres
- service: esignet-postgres-postgresql
+ service: postgres-postgresql
port: 5432
action:
- pass: postgres
+ pass: postgres
\ No newline at end of file
diff --git a/deploy/postgres/postgres-config.yaml b/deploy/postgres/postgres-config.yaml
index c2eb6276f..6ff05b32e 100644
--- a/deploy/postgres/postgres-config.yaml
+++ b/deploy/postgres/postgres-config.yaml
@@ -1,12 +1,13 @@
+
apiVersion: v1
kind: ConfigMap
metadata:
- name: esignet-postgres-config
- namespace: esignet
+ name: postgres-config
+ namespace: postgres
labels:
app: postgres
data:
- database-host: "esignet-postgres-postgresql"
+ database-host: "postgres-postgresql"
database-port: "5432"
database-username: "esignetuser"
database-name: "mosip_esignet"
diff --git a/deploy/postgres/postgres-init.sh b/deploy/postgres/postgres-init.sh
index aeb6d538d..e66afc921 100755
--- a/deploy/postgres/postgres-init.sh
+++ b/deploy/postgres/postgres-init.sh
@@ -28,12 +28,12 @@ function initialize_db() {
then
echo Removing existing mosip_esignet installation and secret
helm -n $NS delete esignet-postgres-init || true
- kubectl -n NS delete secret db-common-secrets || true
+ kubectl -n $NS delete secret db-common-secrets || true
echo Initializing DB
helm -n $NS install esignet-postgres-init mosip/postgres-init --version $CHART_VERSION -f init_values.yaml --wait --wait-for-jobs
break
elif [ "$yn" = "N" ] || [ "$yn" = "n" ]; then
- echo "Skipping eSignet postgres DB initialisation as per your input"
+ echo "Skipping esignet postgres DB initialisation as per your input"
break
else
echo "Incorrect Input. Please choose again"
diff --git a/deploy/postgres/postgres-postgresql.yaml b/deploy/postgres/postgres-postgresql.yaml
new file mode 100644
index 000000000..028dd0ba5
--- /dev/null
+++ b/deploy/postgres/postgres-postgresql.yaml
@@ -0,0 +1,9 @@
+
+apiVersion: v1
+kind: Secret
+metadata:
+ name: postgres-postgresql
+ namespace: postgres
+type: Opaque
+data:
+ postgres-password: cG9zdGdyZXM=
diff --git a/helm/apitestrig/.gitignore b/helm/apitestrig/.gitignore
new file mode 100644
index 000000000..ee3892e87
--- /dev/null
+++ b/helm/apitestrig/.gitignore
@@ -0,0 +1 @@
+charts/
diff --git a/helm/apitestrig/.helmignore b/helm/apitestrig/.helmignore
new file mode 100644
index 000000000..f0c131944
--- /dev/null
+++ b/helm/apitestrig/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/apitestrig/Chart.yaml b/helm/apitestrig/Chart.yaml
new file mode 100644
index 000000000..5815c0228
--- /dev/null
+++ b/helm/apitestrig/Chart.yaml
@@ -0,0 +1,19 @@
+apiVersion: v2
+name: apitestrig
+description: A Helm chart to deploy APITESTRIG for MOSIP modules
+type: application
+version: 1.5.0-develop
+appVersion: ""
+dependencies:
+ - name: common
+ repository: https://charts.bitnami.com/bitnami
+ tags:
+ - bitnami-common
+ version: 1.x.x
+home: https://mosip.io
+keywords:
+ - mosip
+ - apitestrig
+maintainers:
+ - email: info@mosip.io
+ name: MOSIP
diff --git a/helm/apitestrig/README.md b/helm/apitestrig/README.md
new file mode 100644
index 000000000..25c35e359
--- /dev/null
+++ b/helm/apitestrig/README.md
@@ -0,0 +1,10 @@
+# APITESTRIG
+
+Helm chart to deploy APITESTRIG for `MOSIP` modules
+
+## TL;DR
+
+```console
+$ helm repo add mosip https://mosip.github.io
+$ helm install my-release mosip/apitestrig
+```
diff --git a/helm/apitestrig/templates/NOTES.txt b/helm/apitestrig/templates/NOTES.txt
new file mode 100644
index 000000000..8b1378917
--- /dev/null
+++ b/helm/apitestrig/templates/NOTES.txt
@@ -0,0 +1 @@
+
diff --git a/helm/apitestrig/templates/_helpers.tpl b/helm/apitestrig/templates/_helpers.tpl
new file mode 100644
index 000000000..d99caf0c4
--- /dev/null
+++ b/helm/apitestrig/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/*
+Return the proper image name
+*/}}
+{{- define "apitestrig.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "apitestrig.volumePermissions.image" -}}
+{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "apitestrig.imagePullSecrets" -}}
+{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "apitestrig.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (printf "%s" (include "common.names.fullname" .)) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message.
+*/}}
+{{- define "apitestrig.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "apitestrig.validateValues.foo" .) -}}
+{{- $messages := append $messages (include "apitestrig.validateValues.bar" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{- printf "\nVALUES VALIDATION:\n%s" $message -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return podAnnotations
+*/}}
+{{- define "apitestrig.podAnnotations" -}}
+{{- if .Values.podAnnotations }}
+{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }}
+{{- end }}
+{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }}
+{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }}
+{{- end }}
+{{- end -}}
+
+{{/* Create the name for restart cronjob */}}
+{{- define "apitestrig.cronjob" -}}
+{{ default (printf "cronjob-%s" (include "common.names.fullname" .)) .Values.serviceAccount.name }}
+{{- end -}}
\ No newline at end of file
diff --git a/helm/apitestrig/templates/clusterrole.yaml b/helm/apitestrig/templates/clusterrole.yaml
new file mode 100644
index 000000000..da268fdf5
--- /dev/null
+++ b/helm/apitestrig/templates/clusterrole.yaml
@@ -0,0 +1,10 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "apitestrig.serviceAccountName" . }}-{{ .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+rules:
+ - apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","patch","list","watch"]
diff --git a/helm/apitestrig/templates/clusterrolebinding.yaml b/helm/apitestrig/templates/clusterrolebinding.yaml
new file mode 100644
index 000000000..12594c8d1
--- /dev/null
+++ b/helm/apitestrig/templates/clusterrolebinding.yaml
@@ -0,0 +1,19 @@
+kind: ClusterRoleBinding
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+metadata:
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ name: {{ template "common.names.fullname" . }}-{{ .Release.Namespace }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "apitestrig.serviceAccountName" . }}-{{ .Release.Namespace }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "apitestrig.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
diff --git a/helm/apitestrig/templates/configmaps.yaml b/helm/apitestrig/templates/configmaps.yaml
new file mode 100644
index 000000000..492508377
--- /dev/null
+++ b/helm/apitestrig/templates/configmaps.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.apitestrig.configmaps }}
+{{- range $cm_name, $cm_value := .Values.apitestrig.configmaps }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ $cm_name }}
+ namespace: {{ $.Release.Namespace }}
+ labels: {{- include "common.labels.standard" $ | nindent 8 }}
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if $.Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+data:
+ {{- range $key, $value := $cm_value }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm/apitestrig/templates/cronjob.yaml b/helm/apitestrig/templates/cronjob.yaml
new file mode 100644
index 000000000..9a87054da
--- /dev/null
+++ b/helm/apitestrig/templates/cronjob.yaml
@@ -0,0 +1,119 @@
+{{- range $modulename, $module := $.Values.modules }}
+{{- if $module.enabled }}
+---
+apiVersion: {{ include "common.capabilities.cronjob.apiVersion" $ }}
+kind: CronJob
+metadata:
+ name: {{ template "apitestrig.cronjob" $ }}-{{ $modulename }}
+ namespace: {{ $.Release.Namespace }}
+ annotations:
+ {{- if $.Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ labels: {{- include "common.labels.standard" $ | nindent 4 }}
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+
+spec:
+ concurrencyPolicy: Forbid
+ successfulJobsHistoryLimit: 1 # remove jobs which are successfully executed
+ failedJobsHistoryLimit: 1 # except 1 recent failed job, remove jobs which are not successfully executed
+ #schedule: '*/3 * * * *' # cron spec of time, here, 8 o'clock
+ schedule: {{ $.Values.crontime }}
+ jobTemplate:
+ spec:
+ backoffLimit: 0 # this has very low chance of failing, as all this does
+ # is prompt kubernetes to schedule new replica set for
+ # the deployment
+ # activeDeadlineSeconds: 600 # timeout, makes most sense with
+ # "waiting for rollout" variant specified below
+ template:
+ spec:
+ # account configured above
+ restartPolicy: Never
+ serviceAccountName: {{ template "apitestrig.serviceAccountName" $ }}
+ initContainers:
+ {{- if $.Values.enable_insecure }}
+ {{- include "common.tplvalues.render" (dict "value" $.Values.initContainers "context" $) | nindent 12 }}
+ {{- end }}
+ containers:
+ - name: {{ template "apitestrig.serviceAccountName" $ }}-{{ $modulename }}
+ image: {{ $module.image.repository }}:{{ $module.image.tag }}
+ imagePullPolicy: {{ $module.image.pullPolicy }}
+ {{- if $.Values.lifecycleHooks }}
+ lifecycle: {{- include "common.tpvalues.render" (dict "value" $.Values.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if $.Values.containerSecurityContext.enabled }}
+ securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if $.Values.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" $.Values.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if $.Values.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" $.Values.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: container_user
+ value: {{ $.Values.containerSecurityContext.runAsUser }}
+ - name: JDK_JAVA_OPTIONS
+ value: {{ $.Values.additionalResources.javaOpts }}
+ - name: MODULES
+ value: {{ $modulename }}
+ - name: push-reports-to-s3
+ value: {{ quote $.Values.apitestrig.variables.push_reports_to_s3 }}
+ {{- if $.Values.extraEnvVars }}
+ {{- include "common.tpvalues.render" (dict "value" $.Values.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ envFrom:
+ {{- if $.Values.extraEnvVarsCM }}
+ {{- range $.Values.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if $.Values.extraEnvVarsSecret }}
+ {{- range $.Values.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ . }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: spring-service
+ containerPort: {{ $.Values.springServicePort }}
+ volumeMounts:
+ {{- if $.Values.enable_insecure }}
+ - mountPath: /usr/local/openjdk-11/lib/security/cacerts
+ name: cacerts
+ subPath: cacerts
+ {{- end }}
+ {{- if $.Values.apitestrig.volumes }}
+ {{- range $volume_name, $volume_value := $.Values.apitestrig.volumes.configmaps }}
+ - name: {{ $volume_name }}
+ mountPath: {{ $volume_value.volumeMounts.mountPath }}
+ {{- end }}
+ {{- end }}
+ {{- if eq $.Values.apitestrig.variables.push_reports_to_s3 "no" }}
+ - name: {{ $.Values.apitestrig.volumes.reports.name }}
+ mountPath: /home/mosip/testrig/report
+ {{- end }}
+ volumes:
+ {{- if $.Values.enable_insecure }}
+ - name: cacerts
+ emptyDir: {}
+ {{- end }}
+ {{- if $.Values.apitestrig.volumes }}
+ {{- range $volume_name, $volume_value := $.Values.apitestrig.volumes.configmaps }}
+ - name: {{ $volume_name }}
+ configMap:
+ defaultMode: {{ $volume_value.defaultMode }}
+ name: {{ $volume_name }}
+ {{- end }}
+ {{- end }}
+ {{- if eq $.Values.apitestrig.variables.push_reports_to_s3 "no" }}
+ - name: {{ $.Values.apitestrig.volumes.reports.name }}
+ persistentVolumeClaim:
+ claimName: {{ $.Values.apitestrig.volumes.reports.name }}-{{ $.Release.Namespace }}-{{ $modulename }}-pvc
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/helm/apitestrig/templates/extra-list.yaml b/helm/apitestrig/templates/extra-list.yaml
new file mode 100644
index 000000000..9ac65f9e1
--- /dev/null
+++ b/helm/apitestrig/templates/extra-list.yaml
@@ -0,0 +1,4 @@
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}
diff --git a/helm/apitestrig/templates/pv.yaml b/helm/apitestrig/templates/pv.yaml
new file mode 100644
index 000000000..0ae1e5472
--- /dev/null
+++ b/helm/apitestrig/templates/pv.yaml
@@ -0,0 +1,23 @@
+{{- range $modulename, $module := $.Values.modules }}
+{{- if $module.enabled }}
+{{- if eq $.Values.apitestrig.variables.push_reports_to_s3 "no" }}
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{ $.Values.apitestrig.volumes.reports.name }}-{{ $.Release.Namespace }}-{{ $modulename }}-pvc
+ labels:
+ name: {{ $.Values.apitestrig.volumes.reports.name }}
+spec:
+ storageClassName: {{ $.Values.apitestrig.volumes.reports.storageClass }}
+ capacity:
+ storage: {{ $.Values.apitestrig.volumes.reports.size }}
+ accessModes:
+ {{- range $.Values.apitestrig.volumes.reports.accessModes }}
+ - {{ . }}
+ {{- end }}
+ nfs:
+ server: {{ $.Values.apitestrig.volumes.reports.nfs.server }}
+ path: {{ $.Values.apitestrig.volumes.reports.nfs.path }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/helm/apitestrig/templates/pvc.yaml b/helm/apitestrig/templates/pvc.yaml
new file mode 100644
index 000000000..605375b0c
--- /dev/null
+++ b/helm/apitestrig/templates/pvc.yaml
@@ -0,0 +1,23 @@
+{{- range $modulename, $module := $.Values.modules }}
+{{- if $module.enabled }}
+{{- if eq $.Values.apitestrig.variables.push_reports_to_s3 "no" }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ $.Values.apitestrig.volumes.reports.name }}-{{ $.Release.Namespace }}-{{ $modulename }}-pvc
+ namespace: {{ $.Release.Namespace | quote }}
+spec:
+ storageClassName: {{ $.Values.apitestrig.volumes.reports.storageClass }}
+ accessModes:
+ {{- range $.Values.apitestrig.volumes.reports.accessModes }}
+ - {{ . }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ $.Values.apitestrig.volumes.reports.size }}
+ selector:
+ matchLabels:
+ name: {{ $.Values.apitestrig.volumes.reports.name }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/helm/apitestrig/templates/secrets.yaml b/helm/apitestrig/templates/secrets.yaml
new file mode 100644
index 000000000..a3b9561dc
--- /dev/null
+++ b/helm/apitestrig/templates/secrets.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.apitestrig.secrets }}
+{{- range $secret_name, $secret_value := .Values.apitestrig.secrets }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ $secret_name }}-{{ $.Release.Name }}
+ namespace: {{ $.Release.Namespace }}
+ labels: {{- include "common.labels.standard" $ | nindent 8 }}
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if $.Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+type: Opaque
+data:
+ {{- range $key, $value := $secret_value }}
+ {{ $key }}: {{ $value | b64enc | quote }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/helm/apitestrig/templates/service-account.yaml b/helm/apitestrig/templates/service-account.yaml
new file mode 100644
index 000000000..466590df4
--- /dev/null
+++ b/helm/apitestrig/templates/service-account.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ name: {{ template "apitestrig.serviceAccountName" . }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ namespace: {{ .Release.Namespace }}
diff --git a/helm/apitestrig/values.yaml b/helm/apitestrig/values.yaml
new file mode 100644
index 000000000..7c22d0a0a
--- /dev/null
+++ b/helm/apitestrig/values.yaml
@@ -0,0 +1,559 @@
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry and imagePullSecrets
+##
+# global:
+# imageRegistry: myRegistryName
+# imagePullSecrets:
+# - myRegistryKeySecretName
+# storageClass: myStorageClass
+
+## Add labels to all the deployed resources
+##
+commonLabels:
+ app.kubernetes.io/component: mosip
+
+## Add annotations to all the deployed resources
+##
+commonAnnotations: {}
+
+## Kubernetes Cluster Domain
+##
+clusterDomain: cluster.local
+
+## Extra objects to deploy (value evaluated as a template)
+##
+extraDeploy: []
+
+## Number of nodes
+##
+replicaCount: 1
+
+service:
+ type: ClusterIP
+ port: 80
+ ## loadBalancerIP for the SuiteCRM Service (optional, cloud specific)
+ ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
+ ##
+ ## loadBalancerIP:
+ ##
+ ## nodePorts:
+ ## http:
+ ## https:
+ ##
+ nodePorts:
+ http: ""
+ https: ""
+ ## Enable client source IP preservation
+ ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+
+## Port on which this particular spring service module is running.
+springServicePort: 8083
+
+## Configure extra options for liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+##
+
+##
+# existingConfigmap:
+
+## Command and args for running the container (set to default if not set). Use array form
+##
+command: ['/bin/bash']
+args: ['-c', "/home/${container_user}/scripts/fetch_docker_image_hash_ids.sh"]
+
+## Deployment pod host aliases
+## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+##
+hostAliases: []
+
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+##
+resources:
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ limits:
+ cpu: 1500m
+ memory: 3500Mi
+ requests:
+ cpu: 1000m
+ memory: 3500Mi
+
+additionalResources:
+ ## Specify any JAVA_OPTS string here. These typically will be specified in conjunction with above resources
+ ## Example: java_opts: "-Xms500M -Xmx500M"
+ javaOpts: "-Xms2600M -Xmx2600M"
+
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+## Clamav container already runs as 'mosip' user, so we may not need to enable this
+containerSecurityContext:
+ enabled: false
+ runAsUser: mosip
+ runAsNonRoot: true
+
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+##
+podSecurityContext:
+ enabled: false
+ fsGroup: 1001
+
+## Pod affinity preset
+## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+## Allowed values: soft, hard
+##
+podAffinityPreset: ""
+
+## Pod anti-affinity preset
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+## Allowed values: soft, hard
+##
+podAntiAffinityPreset: soft
+
+## Node affinity preset
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+## Allowed values: soft, hard
+##
+nodeAffinityPreset:
+ ## Node affinity type
+ ## Allowed values: soft, hard
+ ##
+ type: ""
+ ## Node label key to match
+ ## E.g.
+ ## key: "kubernetes.io/e2e-az-name"
+ ##
+ key: ""
+ ## Node label values to match
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+
+## Affinity for pod assignment. Evaluated as a template.
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+##
+affinity: {}
+
+## Node labels for pod assignment. Evaluated as a template.
+## ref: https://kubernetes.io/docs/user-guide/node-selection/
+##
+nodeSelector: {}
+
+## Tolerations for pod assignment. Evaluated as a template.
+## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+
+## Pod extra labels
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+##
+podLabels: {}
+
+## Annotations for server pods.
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+##
+podAnnotations: {}
+
+## pods' priority.
+## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+##
+# priorityClassName: ""
+
+## lifecycleHooks for the container to automate configuration before or after startup.
+##
+lifecycleHooks: {}
+
+## Custom Liveness probes for
+##
+customLivenessProbe: {}
+
+## Custom Rediness probes
+##
+customReadinessProbe: {}
+
+## Update strategy - only really applicable for deployments with RWO PVs attached
+## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
+## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
+## terminate the single previous pod, so that the new, incoming pod can attach to the PV
+##
+updateStrategy:
+ type: RollingUpdate
+
+## Additional environment variables to set
+## Example:
+## extraEnvVars:
+## - name: FOO
+## value: "bar"
+##
+extraEnvVars: []
+
+## ConfigMap with extra environment variables
+##
+extraEnvVarsCM:
+ - global
+ - s3
+ - keycloak-host
+ - db
+ - apitestrig
+ - config-server-share
+ - artifactory-share
+## Secret with extra environment variables
+##
+extraEnvVarsSecret:
+ - apitestrig
+ - s3
+ - keycloak-client-secrets
+ - postgres-postgresql
+
+## Extra volumes to add to the deployment
+##
+extraVolumes: []
+
+## Extra volume mounts to add to the container
+##
+extraVolumeMounts: []
+
+## Add init containers to the pods.
+## Example:
+## initContainers:
+## - name: your-image-name
+## image: your-image
+## imagePullPolicy: Always
+## ports:
+## - name: portname
+## containerPort: 1234
+##
+initContainers:
+ - command:
+ - /bin/bash
+ - -c
+ - if [ "$ENABLE_INSECURE" = "true" ]; then HOST=$( env | grep "mosip-api-internal-host"
+ |sed "s/mosip-api-internal-host=//g"); if [ -z "$HOST" ]; then echo "HOST
+ $HOST is empty; EXITING"; exit 1; fi; openssl s_client -servername "$HOST"
+ -connect "$HOST":443 > "$HOST.cer" 2>/dev/null & sleep 2 ; sed -i -ne '/-BEGIN
+ CERTIFICATE-/,/-END CERTIFICATE-/p' "$HOST.cer"; cat "$HOST.cer"; /usr/local/openjdk-11/bin/keytool
+ -delete -alias "$HOST" -keystore $JAVA_HOME/lib/security/cacerts -storepass
+ changeit; /usr/local/openjdk-11/bin/keytool -trustcacerts -keystore "$JAVA_HOME/lib/security/cacerts"
+ -storepass changeit -noprompt -importcert -alias "$HOST" -file "$HOST.cer"
+ ; if [ $? -gt 0 ]; then echo "Failed to add SSL certificate for host $host;
+ EXITING"; exit 1; fi; cp /usr/local/openjdk-11/lib/security/cacerts /cacerts;
+ fi
+ env:
+ - name: ENABLE_INSECURE
+ value: "true"
+ envFrom:
+ - configMapRef:
+ name: global
+ image: docker.io/openjdk:11-jre
+ imagePullPolicy: Always
+ name: cacerts
+ resources: {}
+ securityContext:
+ runAsUser: 0
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /cacerts
+ name: cacerts
+
+## Add sidecars to the pods.
+## Example:
+## sidecars:
+## - name: your-image-name
+## image: your-image
+## imagePullPolicy: Always
+## ports:
+## - name: portname
+## containerPort: 1234
+##
+sidecars: {}
+
+persistence:
+ enabled: false
+ ## If defined, storageClassName:
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack).
+ ##
+ # storageClass: "-"
+ ##
+ ## If you want to reuse an existing claim, you can pass the name of the PVC using
+ ## the existingClaim variable
+ # existingClaim: your-claim
+ ## ReadWriteMany not supported by AWS gp2
+ storageClass:
+ accessModes:
+ - ReadWriteOnce
+ size: 10M
+ existingClaim:
+ # Dir where config and keys are written inside container
+ mountDir:
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
+##
+volumePermissions:
+ enabled: false
+ image:
+ registry: docker.io
+ repository: bitnami/bitnami-shell
+ tag: "10"
+ pullPolicy: Always
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ pullSecrets: []
+ ## - myRegistryKeySecretName
+ ## Init containers' resource requests and limits
+ ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ ##
+ resources:
+ ## We usually recommend not to specify default resources and to leave this as a conscious
+ ## choice for the user. This also increases chances charts run on environments with little
+ ## resources, such as Minikube. If you do want to specify resources, uncomment the following
+ ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ ##
+ limits: {}
+ ## cpu: 100m
+ ## memory: 128Mi
+ ##
+ requests: {}
+ ## cpu: 100m
+ ## memory: 128Mi
+ ##
+
+## Specifies whether RBAC resources should be created
+##
+rbac:
+ create: true
+
+## Specifies whether a ServiceAccount should be created
+##
+serviceAccount:
+ create: true
+ ## The name of the ServiceAccount to use.
+ ## If not set and create is true, a name is generated using the fullname template
+ ##
+ name:
+
+## Prometheus Metrics
+##
+metrics:
+ enabled: false
+ ## Prometheus pod annotations
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ ##
+ podAnnotations:
+ prometheus.io/scrape: "true"
+
+ endpointPath:
+
+ ## Prometheus Service Monitor
+ ## ref: https://github.com/coreos/prometheus-operator
+ ##
+ serviceMonitor:
+ ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry
+ ##
+ enabled: true
+ ## Specify the namespace in which the serviceMonitor resource will be created
+ ##
+ # namespace: ""
+ ## Specify the interval at which metrics should be scraped
+ ##
+ interval: 10s
+ ## Specify the timeout after which the scrape is ended
+ ##
+ # scrapeTimeout: 30s
+ ## Specify Metric Relabellings to add to the scrape endpoint
+ ##
+ # relabellings:
+ ## Specify honorLabels parameter to add the scrape endpoint
+ ##
+ honorLabels: false
+ ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
+ ##
+ additionalLabels: {}
+
+ ## Custom PrometheusRule to be defined
+ ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
+ ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
+ ##
+ prometheusRule:
+ enabled: false
+ additionalLabels: {}
+ namespace: ''
+ ## List of rules, used as template by Helm.
+ ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html
+ # rules:
+ # - alert: RabbitmqDown
+ # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0
+ # for: 5m
+ # labels:
+ # severity: error
+ rules: []
+
+## Admin swagger should have only internal access. Hence linked to internal gateway
+istio:
+ enabled: false
+ gateways:
+ - istio-system/internal
+ prefix:
+ corsPolicy:
+ allowOrigins:
+ - prefix: https://api-internal.sandbox.xyz.net
+ allowCredentials: true
+ allowHeaders:
+ - Accept
+ - Accept-Encoding
+ - Accept-Language
+ - Connection
+ - Content-Type
+ - Cookie
+ - Host
+ - Referer
+ - Sec-Fetch-Dest
+ - Sec-Fetch-Mode
+ - Sec-Fetch-Site
+ - Sec-Fetch-User
+ - Origin
+ - Upgrade-Insecure-Requests
+ - User-Agent
+ - sec-ch-ua
+ - sec-ch-ua-mobile
+ - sec-ch-ua-platform
+ - x-xsrf-token
+ - xsrf-token
+ allowMethods:
+ - GET
+ - POST
+ - PATCH
+ - PUT
+ - DELETE
+
+modules:
+ prereg:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-prereg
+ tag: develop
+ pullPolicy: Always
+ masterdata:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-masterdata
+ tag: develop
+ pullPolicy: Always
+ idrepo:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-idrepo
+ tag: develop
+ pullPolicy: Always
+ partner:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-pms
+ tag: develop
+ pullPolicy: Always
+ pms:
+ enabled: false
+ image:
+ repository: mosipdev/apitest-pms
+ tag: develop
+ pullPolicy: Always
+ resident:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-resident
+ tag: develop
+ pullPolicy: Always
+ auth:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-auth
+ tag: develop
+ pullPolicy: Always
+ esignet:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-esignet
+ tag: develop
+ pullPolicy: Always
+ mimoto:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-mimoto
+ tag: develop
+ pullPolicy: Always
+
+crontime: "0 3 * * *" ## run cronjob every day at 3 AM (time hr: 0-23 )
+
+apitestrig:
+ configmaps:
+ s3:
+ s3-host: 'http://minio.minio:9000'
+ s3-user-key: 'admin'
+ s3-region: ''
+ db:
+ db-port: '5432'
+ db-su-user: 'postgres'
+ db-server: 'api-internal.sandbox.xyz.net'
+ apitestrig:
+ ENV_USER: 'api-internal.sandbox'
+ ENV_ENDPOINT: 'https://api-internal.sandbox.xyz.net'
+ ENV_TESTLEVEL: 'smokeAndRegression'
+ authDemoServiceBaseURL: http://authdemo.authdemo
+ authDemoServicePort: 80
+ eSignetDeployed: yes or no
+ authCertsPath: '/home/mosip/authcerts'
+ scripts:
+ fetch_docker_image_hash_ids.sh: |
+ #!/bin/bash
+ sleep 5
+ export DOCKER_HASH_ID=$( kubectl get pod "$HOSTNAME" -n "$NS" -o jsonpath='{.status.containerStatuses[*].imageID}' | sed 's/ /\n/g' | grep -v 'istio' | sed 's/docker\-pullable\:\/\///g' )
+ export DOCKER_IMAGE=$( kubectl get pod "$HOSTNAME" -n "$NS" -o jsonpath='{.status.containerStatuses[*].image}' | sed 's/ /\n/g' | grep -v 'istio' | sed 's/docker\-pullable\:\/\///g' )
+ if [[ -z $DOCKER_HASH_ID ]]; then
+ echo "DOCKER_HASH_ID IS EMPTY;EXITING";
+ exit 1;
+ fi
+ echo "DOCKER_HASH_ID ; $DOCKER_HASH_ID"
+ echo "DOCKER_IMAGE : $DOCKER_IMAGE"
+ kubectl get pods -A -o=jsonpath='{range .items[*]}{.metadata.namespace}{","}{.metadata.labels.app\.kubernetes\.io\/name}{","}{.status.containerStatuses[?(@.name!="istio-proxy")].image}{","}{.status.containerStatuses[?(@.name!="istio-proxy")].imageID}{","}{.metadata.creationTimestamp}{"\n"}' | sed 's/ /\n/g' | grep -vE 'istio*|longhorn*|cattle*|rancher|kube' | sed 's/docker\-pullable\:\/\///g' | sort -u | sed '/,,,/d' | awk -F ',' 'BEGIN {print "{ \"POD_NAME\": \"'$(echo $HOSTNAME)'\", \"DOCKER_IMAGE\": \"'$(echo $DOCKER_IMAGE)'\", \"DOCKER_HASH_ID\": \"'$(echo $DOCKER_HASH_ID)'\", \"k8s-cluster-image-list\": ["} {print "{"} {print "\"namespace\": \"" $1 "\","} {print "\"app_name\": \"" $2 "\","} {print "\"docker_image_name\": \"" $3 "\","} {print "\"docker_image_id\": \"" $4 "\","} {print "\"creation_timestamp\": \"" $5 "\"" } {print "},"} END {print "]}"}' | sed -z 's/},\n]/}\n]/g' | jq -r . | tee -a images-list.json
+ ## run entrypoint script
+ sleep 5
+ cd /home/${container_user}/
+ bash ./entrypoint.sh
+ secrets:
+ s3:
+ s3-user-secret: 'password'
+ apitestrig:
+ volumes:
+ configmaps:
+ scripts:
+ defaultMode: 0777
+ volumeMounts:
+ mountPath: '/home/mosip/scripts/'
+ reports:
+ name: apitestrig-reports
+ storageClass: nfs-client
+ accessModes:
+ - ReadWriteMany
+ size: 10Mi
+ existingClaim:
+ # Dir where config and keys are written inside container
+ mountDir: /home/mosip/testrig/report
+ nfs:
+ path: "/srv/nfs/sandbox/onboarding" # Dir within the nfs server where config repo is cloned/maintained locally.
+ server: "nfs-server" # Ip address of nfs server.
+ variables:
+ push_reports_to_s3: "no"
+enable_insecure: false
diff --git a/partner-onboarder/install.sh b/partner-onboarder/install.sh
index d9cf12ab9..82599ed23 100755
--- a/partner-onboarder/install.sh
+++ b/partner-onboarder/install.sh
@@ -73,7 +73,7 @@ function installing_onboarder() {
echo "NFS server not provided; EXITING."
exit 1;
fi
- read -p "Please provide NFS directory to store reports from NFS server (e.g. /srv/nfs//onboarder/), make sure permission is 777 for the folder: " nfs_path
+ read -p "Please provide NFS directory to store reports from NFS server (e.g. /srv/nfs/mosip//onboarder/), make sure permission is 777 for the folder: " nfs_path
if [[ -z $nfs_path ]]; then
echo "NFS Path not provided; EXITING."
exit 1;
diff --git a/partner-onboarder/values.yaml b/partner-onboarder/values.yaml
index 691725e11..cb214c694 100644
--- a/partner-onboarder/values.yaml
+++ b/partner-onboarder/values.yaml
@@ -2,6 +2,13 @@
# registry: docker.io
# repository: mosipdev/partner-onboarder
# tag: develop
+resources:
+ limits:
+ cpu: 500m
+ memory: 500Mi
+ requests:
+ cpu: 500m
+ memory: 500Mi
onboarding:
modules: