diff --git a/deployment/sandbox-v2/helm/charts/admin/templates/ui-configmap.yaml b/deployment/sandbox-v2/helm/charts/admin/templates/ui-configmap.yaml index 3fc2322d4..815996520 100644 --- a/deployment/sandbox-v2/helm/charts/admin/templates/ui-configmap.yaml +++ b/deployment/sandbox-v2/helm/charts/admin/templates/ui-configmap.yaml @@ -1,6 +1,6 @@ apiVersion: v1 data: - config.json: '{"baseUrl":"{{ tpl .Values.services.ui.apiHost . }}", "adminUrl" : "/admin-ui/", "primaryLangCode": "eng", "secondaryLangCode": "ara", "validateToken": "authmanager/authorize/admin/validateToken", "login": "admin/login/", "logout": "admin/logout/user","templateRepoUrl":"/admin-ui/templates/"}' + config.json: '{"baseUrl":"{{ tpl .Values.services.ui.apiHost . }}", "adminUrl" : "/admin-ui/", "primaryLangCode": "eng", "secondaryLangCode": "ara", "validateToken": "authmanager/authorize/admin/validateToken", "login": "admin/login/", "logout": "admin/logout/user", "templateRepoUrl":"/admin-ui/templates/"}' kind: ConfigMap metadata: name: {{ .Values.services.ui.configName }} diff --git a/deployment/sandbox-v2/podconfig.yml b/deployment/sandbox-v2/podconfig.yml index b87551af7..1ee282dcf 100644 --- a/deployment/sandbox-v2/podconfig.yml +++ b/deployment/sandbox-v2/podconfig.yml @@ -5,154 +5,154 @@ podconfig: minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi masterdata: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms2000M -Xmx2000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi idgen: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi prid: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi sync: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms2000M -Xmx2000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi audit : replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi key: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi notifier: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi otp: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi rid: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi keyMigrator: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi regproc: camel: @@ -160,21 +160,21 @@ podconfig: maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi group2: replicas: 1 maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms2000M -Xmx2000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 300m @@ -188,7 +188,7 @@ podconfig: maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms2000M -Xmx2000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 300m @@ -202,21 +202,21 @@ podconfig: maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi group5: replicas: 1 maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms1500M -Xmx1500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 300m @@ -230,84 +230,84 @@ podconfig: maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms1500M -Xmx1500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi group7: replicas: 1 maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms2000M -Xmx2000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi trans: replicas: 1 maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi workflow: replicas: 1 maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi reprocess: replicas: 1 maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi notificationService: replicas: 1 maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi dmzregproc: group1: @@ -315,56 +315,56 @@ podconfig: minReadySeconds: 10 maxUnavailable: 1 maxSurge: 1 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi camel: replicas: 1 minReadySeconds: 10 maxUnavailable: 1 maxSurge: 1 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi status: replicas: 1 minReadySeconds: 10 maxUnavailable: 1 maxSurge: 1 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi pktserver: replicas: 1 minReadySeconds: 10 maxUnavailable: 1 maxSurge: 1 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi prereg: application: @@ -372,83 +372,83 @@ podconfig: minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi batch: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi booking: replicas: 1 minReadySeconds: 10 maxUnavailable: 1 maxSurge: 1 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi datasync: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi captcha: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi ui: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi ida: auth: @@ -456,42 +456,42 @@ podconfig: minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms1500M -Xmx1500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi internal: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms1500M -Xmx1500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi otp: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi idrepo: identity: @@ -499,56 +499,56 @@ podconfig: minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi vid: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi credentialreq: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi credentialservice: replicas: 1 minReadySeconds: 10 maxUnavailable: 0 maxSurge: 1 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi pms: partner: @@ -556,28 +556,28 @@ podconfig: maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms2000M -Xmx2000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi policy: replicas: 1 maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi packetmanager: service: @@ -585,28 +585,28 @@ podconfig: maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms1500M -Xmx1500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi resident: service: replicas: 1 maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms1000M -Xmx1000M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi activemq: service: @@ -617,38 +617,38 @@ podconfig: resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi admin: service: replicas: 1 maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi hotlist: replicas: 1 maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi artifactory: service: @@ -659,10 +659,10 @@ podconfig: resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi clamav: service: @@ -673,10 +673,10 @@ podconfig: resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi config_server: service: @@ -687,10 +687,10 @@ podconfig: resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi datashare: service: @@ -698,14 +698,14 @@ podconfig: maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms1500M -Xmx1500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi docker_service: service: @@ -723,10 +723,10 @@ podconfig: resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 600m - memory: 3Gi + memory: 1.5Gi mockabis: service: replicas: 1 @@ -778,10 +778,10 @@ podconfig: resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 1000m - memory: 6Gi + memory: 1.5Gi print: service: replicas: 1 @@ -791,10 +791,10 @@ podconfig: resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi reg_client_downloader: service: @@ -802,14 +802,14 @@ podconfig: maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms1500M -Xmx1500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 300m - memory: 1Gi + memory: 1.5Gi limits: cpu: 500m - memory: 3Gi + memory: 1.5Gi websub: service: @@ -820,10 +820,10 @@ podconfig: resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi consolidator: replicas: 1 @@ -833,10 +833,10 @@ podconfig: resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi mosip_file_server: service: @@ -844,14 +844,14 @@ podconfig: maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 1Gi + memory: 1.5Gi tusd: service: @@ -859,14 +859,14 @@ podconfig: maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 3Gi + memory: 1.5Gi resident_app_server: service: @@ -874,14 +874,14 @@ podconfig: maxSurge: 1 maxUnavailable: 0 minReadySeconds: 10 - java_opts: "-Xms500M -Xmx500M" + java_opts: "-Xms750M -Xmx750M" resources: requests: cpu: 200m - memory: 1Gi + memory: 1.5Gi limits: cpu: 300m - memory: 1Gi + memory: 1.5Gi ingress: nginx: diff --git a/deployment/sandbox-v2/roles/mosip-file-server/templates/mosip-context.yml.j2 b/deployment/sandbox-v2/roles/mosip-file-server/templates/mosip-context.yml.j2 index 919c63d83..24a1b080e 100644 --- a/deployment/sandbox-v2/roles/mosip-file-server/templates/mosip-context.yml.j2 +++ b/deployment/sandbox-v2/roles/mosip-file-server/templates/mosip-context.yml.j2 @@ -35,8 +35,12 @@ }, "phone": "mosip:phone", "postalCode": { - "@id": "{{site.sandbox_public_url}}/mosip#postalCode", - "@context": {"value": "rdf:value", "lang": "@language"} + "@id": "{{site.sandbox_public_url}}/mosip#postalCode", + "@context": {"value": "rdf:value", "lang": "@language"} + }, + "province": { + "@id": "{{site.sandbox_public_url}}/mosip#province", + "@context": {"value": "rdf:value", "lang": "@language"} }, "region": { "@id": "{{site.sandbox_public_url}}/mosip#region", @@ -44,4 +48,4 @@ }, "biometrics": "mosip:biometrics" }] -} \ No newline at end of file +} diff --git a/deployment/sandbox-v2/roles/nginx/templates/nginx_https_conf.yml.j2 b/deployment/sandbox-v2/roles/nginx/templates/nginx_https_conf.yml.j2 index 602923a0b..be3a7e23a 100644 --- a/deployment/sandbox-v2/roles/nginx/templates/nginx_https_conf.yml.j2 +++ b/deployment/sandbox-v2/roles/nginx/templates/nginx_https_conf.yml.j2 @@ -306,6 +306,10 @@ http { location /registration-client/ { proxy_pass {{clusters.dmz.ingress.base_url}}/registration-client/; } + + location /resident-ui/ { + proxy_pass {{clusters.dmz.ingress.base_url}}/resident-ui/; + } location /.well-known/ { if ($request_method = 'OPTIONS') { diff --git a/deployment/sandbox-v2/site.yml b/deployment/sandbox-v2/site.yml index 6b5557b5d..b2f71614b 100644 --- a/deployment/sandbox-v2/site.yml +++ b/deployment/sandbox-v2/site.yml @@ -47,7 +47,9 @@ - import_playbook: playbooks/mock-mv.yml - import_playbook: playbooks/tusd.yml - import_playbook: playbooks/resident-app.yml +# Regclient downloader - import_playbook: playbooks/reg-client-downloader.yml +# Mosip-file-server - import_playbook: playbooks/mosip-file-server.yml -# Certificates Upload +# upload Certificates - import_playbook: playbooks/uploadCerts.yml diff --git a/deployment/sandbox-v2/utils/ports.yml b/deployment/sandbox-v2/utils/ports.yml new file mode 100644 index 000000000..1ab69fdb8 --- /dev/null +++ b/deployment/sandbox-v2/utils/ports.yml @@ -0,0 +1,47 @@ +#This is used for enabling ports in sandbox installation with firewall enabled +##usage: ansible-playbook -i hosts.ini ports.yml +- name: FirewallD installation and ports addition to the nodes + hosts: cluster + vars: + port_list: + - { port: 8080/tcp, state: enabled } + - { port: 22/tcp, state: enabled } + - { port: 25/tcp, state: enabled } + - { port: 111/tcp, state: enabled } + - { port: 80/tcp, state: enabled } + - { port: 601/tcp, state: enabled } + - { port: 443/tcp, state: enabled } + - { port: 53/udp, state: enabled } + - { port: 7703-7726/tcp, state: enabled } + - { port: 2376/tcp, state: enabled } + - { port: 2379-2380/tcp, state: enabled } + - { port: 30000-32767/udp, state: enabled } + - { port: 10250-10252/tcp, state: enabled } + - { port: 6443/tcp, state: enabled } + - { port: 9100/tcp, state: enabled } + - { port: 2049/tcp, state: enabled } + - { port: 2048/tcp, state: enabled } + - { port: 41497-43774/tcp, state: enabled } + - { port: 25654-29933/tcp, state: enabled } + tasks: + - name: Install firewalld + yum: + name: firewalld + state: latest + notify: + - start firewalld + - name: start firewalld + service: + name: firewalld + state: started + enabled: yes + become: yes + - name: enable ports + firewalld: + zone: public + port: "{{ item.port }}" + permanent: true + state: "{{ item.state }}" + loop: + "{{ port_list }}" + become: yes diff --git a/deployment/sandbox-v2/utils/prop_comparator/prop_comparator.py b/deployment/sandbox-v2/utils/prop_comparator/prop_comparator.py index 61fff6795..c3d670a33 100755 --- a/deployment/sandbox-v2/utils/prop_comparator/prop_comparator.py +++ b/deployment/sandbox-v2/utils/prop_comparator/prop_comparator.py @@ -47,11 +47,17 @@ def diff_report(fname1, fname2): print('') print('=======================================================') print('\nNEW PROPERTIES in %s' % fname1) - pp.pprint(set1 - set2) + print('') + diff = set1 - set2 + for p in list(diff): + print('%s: %s' % (p, props1[p])) print('') print('=======================================================') print('\nNEW PROPERITES in %s' % fname2) - pp.pprint(set2 - set1) + print('') + diff = set2 - set1 + for p in list(diff): + print('%s: %s' % (p, props2[p])) print('') def main(): diff --git a/deployment/sandbox-v2/utils/reprocess/config.py b/deployment/sandbox-v2/utils/reprocess/config.py index bbfdcc449..9e5eb2744 100644 --- a/deployment/sandbox-v2/utils/reprocess/config.py +++ b/deployment/sandbox-v2/utils/reprocess/config.py @@ -12,5 +12,5 @@ db_host = os.getenv('DB_HOST') db_port = os.getenv('DB_PORT') -query="select reg_id,process,workflow_instance_id from registration where latest_trn_status_code in ('SUCCESS', 'REPROCESS', 'IN_PROGRESS') and reg_process_retry_count<=500 and latest_trn_dtimes < (SELECT NOW() - INTERVAL '1 DAY') and status_code NOT IN ('PROCESSED', 'FAILED', 'REJECTED') LIMIT 1000" -delay = 1 # seconds +query="select reg_id,process,workflow_instance_id from registration where latest_trn_status_code in ('SUCCESS', 'REPROCESS', 'IN_PROGRESS') and reg_process_retry_count<=500 and latest_trn_dtimes < (SELECT NOW() - INTERVAL '1 DAY') and status_code NOT IN ('PROCESSED', 'FAILED', 'REJECTED') LIMIT 100" +delay = 2 # seconds diff --git a/deployment/sandbox-v2/versions.yml b/deployment/sandbox-v2/versions.yml index 2169b3280..89d8f9c89 100644 --- a/deployment/sandbox-v2/versions.yml +++ b/deployment/sandbox-v2/versions.yml @@ -4,174 +4,174 @@ versions: kernel: auth: - 'kernel-auth-service': 'mosipqa/kernel-auth-service:1.2.0.1' + 'kernel-auth-service': 'mosipdev/kernel-auth-service:1.2.0.1' masterdata: - 'kernel-masterdata-service': 'mosipqa/kernel-masterdata-service:1.2.0.1' + 'kernel-masterdata-service': 'mosipdev/kernel-masterdata-service:1.2.0.1' idgen: - 'kernel-idgenerator-service': 'mosipqa/kernel-idgenerator-service:1.2.0.1' + 'kernel-idgenerator-service': 'mosipdev/kernel-idgenerator-service:1.2.0.1' prid: - 'kernel-pridgenerator-service': 'mosipqa/kernel-pridgenerator-service:1.2.0.1' + 'kernel-pridgenerator-service': 'mosipdev/kernel-pridgenerator-service:1.2.0.1' sync: - 'kernel-syncdata-service': 'mosipqa/kernel-syncdata-service:1.2.0.1' + 'kernel-syncdata-service': 'mosipdev/kernel-syncdata-service:1.2.0.1' audit : - 'kernel-auditmanager-service': 'mosipqa/kernel-auditmanager-service:1.2.0.1' + 'kernel-auditmanager-service': 'mosipdev/kernel-auditmanager-service:1.2.0.1' key: - 'kernel-keymanager-service': 'mosipqa/kernel-keymanager-service:1.2.0.1' + 'kernel-keymanager-service': 'mosipdev/kernel-keymanager-service:1.2.0.1' notifier: - 'kernel-notification-service': 'mosipqa/kernel-notification-service:1.2.0.1' + 'kernel-notification-service': 'mosipdev/kernel-notification-service:1.2.0.1' otp: - 'kernel-otpmanager-service': 'mosipqa/kernel-otpmanager-service:1.2.0.1' + 'kernel-otpmanager-service': 'mosipdev/kernel-otpmanager-service:1.2.0.1' rid: - 'kernel-ridgenerator-service': 'mosipqa/kernel-ridgenerator-service:1.2.0.1' + 'kernel-ridgenerator-service': 'mosipdev/kernel-ridgenerator-service:1.2.0.1' keygen: - 'kernel-keys-generator': 'mosipqa/keys-generator:1.2.0.1' + 'kernel-keys-generator': 'mosipdev/keys-generator:1.2.0.1' keymigrator: - 'keymigrator': 'mosipqa/keys-migrator:1.2.0.1' + 'keymigrator': 'mosipdev/keys-migrator:1.2.0.1' prereg: application: - 'prereg-application-service': 'mosipqa/pre-registration-application-service:1.2.0.1' + 'prereg-application-service': 'mosipdev/pre-registration-application-service:1.2.0.1' batch: - 'prereg-batchjob-service': 'mosipqa/pre-registration-batchjob:1.2.0.1' + 'prereg-batchjob-service': 'mosipdev/pre-registration-batchjob:1.2.0.1' booking: - 'prereg-booking-service': 'mosipqa/pre-registration-booking-service:1.2.0.1' + 'prereg-booking-service': 'mosipdev/pre-registration-booking-service:1.2.0.1' datasync: - 'prereg-datasync-service': 'mosipqa/pre-registration-datasync-service:1.2.0.1' + 'prereg-datasync-service': 'mosipdev/pre-registration-datasync-service:1.2.0.1' captcha: - 'prereg-captcha-service': 'mosipqa/pre-registration-captcha-service:1.2.0.1' + 'prereg-captcha-service': 'mosipdev/pre-registration-captcha-service:1.2.0.1' ui: - 'prereg-ui': 'mosipqa/pre-registration-ui:1.2.0.1' + 'prereg-ui': 'mosipdev/pre-registration-ui:1.2.0.1' registration_client: downloader: - 'reg-client-downloader': 'mosipqa/registration-client:1.2.0.1' + 'reg-client-downloader': 'mosipdev/registration-client:1.2.0.1' version: '1.2.0.1-SNAPSHOT' regproc: group1: - 'regproc-stage-group-1': 'mosipqa/registration-processor-stage-group-1:1.2.0.1' + 'regproc-stage-group-1': 'mosipdev/registration-processor-stage-group-1:1.2.0.1' group2: - 'regproc-stage-group-2': 'mosipqa/registration-processor-stage-group-2:1.2.0.1' + 'regproc-stage-group-2': 'mosipdev/registration-processor-stage-group-2:1.2.0.1' group3: - 'regproc-stage-group-3': 'mosipqa/registration-processor-stage-group-3:1.2.0.1' + 'regproc-stage-group-3': 'mosipdev/registration-processor-stage-group-3:1.2.0.1' group4: - 'regproc-stage-group-4': 'mosipqa/registration-processor-stage-group-4:1.2.0.1' + 'regproc-stage-group-4': 'mosipdev/registration-processor-stage-group-4:1.2.0.1' group5: - 'regproc-stage-group-5': 'mosipqa/registration-processor-stage-group-5:1.2.0.1' + 'regproc-stage-group-5': 'mosipdev/registration-processor-stage-group-5:1.2.0.1' group6: - 'regproc-stage-group-6': 'mosipqa/registration-processor-stage-group-6:1.2.0.1' + 'regproc-stage-group-6': 'mosipdev/registration-processor-stage-group-6:1.2.0.1' group7: - 'regproc-stage-group-7': 'mosipqa/registration-processor-stage-group-7:1.2.0.1' + 'regproc-stage-group-7': 'mosipdev/registration-processor-stage-group-7:1.2.0.1' camel: - 'regproc-camel-stage': 'mosipqa/registration-processor-common-camel-bridge:1.2.0.1' + 'regproc-camel-stage': 'mosipdev/registration-processor-common-camel-bridge:1.2.0.1' trans: - 'regproc-registration-transaction-service': 'mosipqa/registration-processor-registration-transaction-service:1.2.0.1' + 'regproc-registration-transaction-service': 'mosipdev/registration-processor-registration-transaction-service:1.2.0.1' reprocess: - 'regproc-reprocessor': 'mosipqa/registration-processor-reprocessor:1.2.0.1' + 'regproc-reprocessor': 'mosipdev/registration-processor-reprocessor:1.2.0.1' notificationService: - 'regproc-notification-service': 'mosipqa/registration-processor-notification-service:1.2.0.1' + 'regproc-notification-service': 'mosipdev/registration-processor-notification-service:1.2.0.1' salt: - 'regproc-salt-generator': 'mosipqa/kernel-salt-generator:1.2.0.1' + 'regproc-salt-generator': 'mosipdev/kernel-salt-generator:1.2.0.1' workflow: - 'regproc-workflowmanager': 'mosipqa/registration-processor-workflow-manager-service:1.2.0.1' + 'regproc-workflowmanager': 'mosipdev/registration-processor-workflow-manager-service:1.2.0.1' dmzregproc: group1: - 'regproc-stage-group-1': 'mosipqa/registration-processor-stage-group-1:1.2.0.1' + 'regproc-stage-group-1': 'mosipdev/registration-processor-stage-group-1:1.2.0.1' camel: - 'regproc-camel-stage': 'mosipqa/registration-processor-common-camel-bridge:1.2.0.1' + 'regproc-camel-stage': 'mosipdev/registration-processor-common-camel-bridge:1.2.0.1' status: - 'regproc-registration-status-service': 'mosipqa/registration-processor-registration-status-service:1.2.0.1' + 'regproc-registration-status-service': 'mosipdev/registration-processor-registration-status-service:1.2.0.1' pktserver: - 'regproc-dmz-packet-server': 'mosipqa/registration-processor-dmz-packet-server:1.0.9' + 'regproc-dmz-packet-server': 'mosipdev/registration-processor-dmz-packet-server:1.0.9' ida: auth: - 'ida-auth-service': 'mosipqa/authentication-service:1.2.0.1' + 'ida-auth-service': 'mosipdev/authentication-service:1.2.0.1' internal: - 'ida-internal-service': 'mosipqa/authentication-internal-service:1.2.0.1' + 'ida-internal-service': 'mosipdev/authentication-internal-service:1.2.0.1' otp: - 'ida-otp-service': 'mosipqa/authentication-otp-service:1.2.0.1' + 'ida-otp-service': 'mosipdev/authentication-otp-service:1.2.0.1' keygen: - 'ida-key-generator': 'mosipqa/keys-generator:1.2.0.1' + 'ida-key-generator': 'mosipdev/keys-generator:1.2.0.1' idrepo: identity: - 'idrepo-identity-service': 'mosipqa/id-repository-identity-service:1.2.0.1' + 'idrepo-identity-service': 'mosipdev/id-repository-identity-service:1.2.0.1' vid: - 'idrepo-vid-service': 'mosipqa/id-repository-vid-service:1.2.0.1' + 'idrepo-vid-service': 'mosipdev/id-repository-vid-service:1.2.0.1' credentialreq: - 'idrepo-credential-request-generator': 'mosipqa/credential-request-generator:1.2.0.1' + 'idrepo-credential-request-generator': 'mosipdev/credential-request-generator:1.2.0.1' credentialservice: - 'idrepo-credential-service': 'mosipqa/credential-service:1.2.0.1' + 'idrepo-credential-service': 'mosipdev/credential-service:1.2.0.1' salt: - 'idrepo-salt-generator': 'mosipqa/id-repository-salt-generator:1.2.0.1' + 'idrepo-salt-generator': 'mosipdev/id-repository-salt-generator:1.2.0.1' feeder: - 'idrepo-credential-feeder': 'mosipqa/id-repository-credentials-feeder:1.2.0.1' + 'idrepo-credential-feeder': 'mosipdev/id-repository-credentials-feeder:1.2.0.1' pms: partner: - 'pms-partner-manager-service': 'mosipqa/partner-management-service:1.2.0.1' + 'pms-partner-manager-service': 'mosipdev/partner-management-service:1.2.0.1' policy: - 'pms-policy-manager-service': 'mosipqa/policy-management-service:1.2.0.1' + 'pms-policy-manager-service': 'mosipdev/policy-management-service:1.2.0.1' ui: - 'pms-ui': 'mosipqa/pmp-ui:1.2.0.1' + 'pms-ui': 'mosipdev/pmp-ui:1.2.0.1' resident: resident: - 'resident-service': 'mosipqa/resident-service:1.2.0.1' + 'resident-service': 'mosipdev/resident-service:1.2.0.1' admin: service: - 'admin-service': 'mosipqa/admin-service:1.2.0.1' + 'admin-service': 'mosipdev/admin-service:1.2.0.1' ui: - 'admin-ui': 'mosipqa/admin-ui:1.2.0.1' + 'admin-ui': 'mosipdev/admin-ui:1.2.0.1' hotlist: - 'admin-hotlist-service': 'mosipqa/hotlist-service:1.2.0.1' + 'admin-hotlist-service': 'mosipdev/hotlist-service:1.2.0.1' packetmanager: service: - 'packetmanager-service': 'mosipqa/commons-packet-service:1.2.0.1' + 'packetmanager-service': 'mosipdev/commons-packet-service:1.2.0.1' datashare: service: - 'datashare-service': 'mosipqa/data-share-service:1.2.0.1' + 'datashare-service': 'mosipdev/data-share-service:1.2.0.1' print: service: - 'print-service': 'mosipqa/print:1.2.0.1' + 'print-service': 'mosipdev/print:1.2.0.1' websub: service: - 'websub-service': 'mosipqa/websub-service:1.2.0.1' + 'websub-service': 'mosipdev/websub-service:1.2.0.1' consolidator: - 'consolidator-websub-service': 'mosipqa/consolidator-websub-service:1.2.0.1' + 'consolidator-websub-service': 'mosipdev/consolidator-websub-service:1.2.0.1' mock: abis: # Please don't change this image - 'mock-abis': 'mosipqa/mock-abis:develop' + 'mock-abis': 'mosipdev/mock-abis:develop' mv: - 'mock-mv': 'mosipqa/mock-mv:1.1.5' + 'mock-mv': 'mosipdev/mock-mv:1.1.5' biosdk: - 'mock-biosdk-service': 'mosipqa/biosdk-server:1.2.0.1' + 'mock-biosdk-service': 'mosipdev/biosdk-server:1.2.0.1' artifactory: service: - 'artifactory-service': 'mosipqa/artifactory-server:1.2.0.1' + 'artifactory-service': 'mosipdev/artifactory-server:1.2.0.1' keycloak: service: - 'keycloak': 'mosipqa/mosip-keycloak:1.2.0.1' + 'keycloak': 'mosipdev/mosip-keycloak:1.2.0.1' mosip_file_server: service: - 'mosip_file_server': 'mosipqa/mosip-file-server:1.2.0.1' + 'mosip_file_server': 'mosipdev/mosip-file-server:1.2.0.1' tusd: service: - 'tusd-server': 'mosipqa/tusd-server:1.2.0.1' + 'tusd-server': 'mosipdev/tusd-server:1.2.0.1' resident_app_server: service: - 'resident-app-server': 'mosipqa/mimoto:develop' + 'resident-app-server': 'mosipdev/mimoto:develop' diff --git a/deployment/v3/docs/create-gmail-app-password.md b/deployment/v3/docs/create-gmail-app-password.md index 98d710d16..9c602f039 100644 --- a/deployment/v3/docs/create-gmail-app-password.md +++ b/deployment/v3/docs/create-gmail-app-password.md @@ -29,11 +29,12 @@ 1. Make sure below listed properties from `kernel-default.properties` or `kernel-*.properties` file in config set to false. ``` - management.health.mail.enabled=false + # uncomment if SMTP mail health check not required + #management.health.mail.enabled=false mosip.kernel.sms.proxy-sms=false mosip.kernel.mail.proxy-mail=false ``` # References -1. [support.teamgate.com](https://support.teamgate.com/hc/en-us/articles/115002064229-How-to-create-a-password-to-connect-email-while-using-2-step-verification-in-Gmail-) \ No newline at end of file +1. [support.teamgate.com](https://support.teamgate.com/hc/en-us/articles/115002064229-How-to-create-a-password-to-connect-email-while-using-2-step-verification-in-Gmail-) diff --git a/deployment/v3/docs/digitalcardcert.md b/deployment/v3/docs/digitalcardcert.md new file mode 100644 index 000000000..4ecad31ae --- /dev/null +++ b/deployment/v3/docs/digitalcardcert.md @@ -0,0 +1,101 @@ +# DIGITALCARD Module certificate exchange guide + +- Below are the steps needed to be performed for the certificate exchange of DIGITALCARD + + * 1. Authenticate yourself and get authorization token from authmanager swagger. Also adding the request after that which can be used.. please update the domain name in the request. + + SWAGGER URL:- ```https://minibox.mosip.net/v1/authmanager/swagger-ui.html#/authmanager/clientIdSecretKeyUsingPOST ``` hit authmanager section in try-out section. + ``` + { + "id": "string", + "metadata": {}, + "request": { + "appId": "ida", + "clientId": "mosip-ida-client", + "secretKey": "abc123" + }, + "requesttime": "2018-12-10T06:12:52.994Z", + "version": "string" + } + ``` + * 2. Get the ROOT certificate data from the below URL, Copy it and use it for certificate exchange in the next step. + ```https://minibox.mosip.net/v1/keymanager/getCertificate?applicationId=ROOT``` + + * 3. Upload ROOT certificate from the above request in the below SWAGGER URL:- ```https://minibox.mosip.net/v1/partnermanager/swagger-ui.html#/Partner%20Service%20Controller/uploadCACertificateUsingPOST``` Partner_Service_Controller --> /partners/certificate/ca/upload --> with below request + ``` + { + "id": "string", + "metadata": {}, + "request": { + "certificateData": "-----BEGIN CERTIFICATE-----\nMIIDTjCCAjagAwIBAgIEYFrxXTANBgkqhkiG9w0BAQsFADBpMQswCQYDVQQGEwJJ\nTjESMBAGA1UECAwJS2FybmF0YWthMRIwEAYDVQQHDAlCYW5nYWxvcmUxHjAcBgNV\nBAoMFW1wYXJ0bmVyLWRlZmF1bHQtYWJpczESMBAGA1UEAwwJYWJpcy1yb290MB4X\nDTIxMDMyNDA3NTkyNVoXDTIyMDMyNDA3NTkyNVowaTELMAkGA1UEBhMCSU4xEjAQ\nBgNVBAgMCUthcm5hdGFrYTESMBAGA1UEBwwJQmFuZ2Fsb3JlMR4wHAYDVQQKDBVt\ncGFydG5lci1kZWZhdWx0LWFiaXMxEjAQBgNVBAMMCWFiaXMtcm9vdDCCASIwDQYJ\nKoZIhvcNAQEBBQADggEPADCCAQoCggEBANkwlDzNZTBi1fBF4GU4qFAJ3S+Ca0Kf\ngfvg93rQlZ5LBTnZFwAxpCZtGHYb7vkqM9e7adYGC48EPWI0A+48QmF3Z6vSBXg9\nKckINa/vFCTEYrctMHS8CcBjWBf9agJq4+wWqNu8sYHD9pOzDf1uMbQJTI5VvgGx\nv890pZrXdIrR4MPTLB0rkl2sVOqbG7bts0Eqh8TO86126CDzoDrtBCj3RBP/j/dg\nBmz7LWFkG6/by+mXzdZcS46v7P/Q366WrDbMCCtjKIRAA0HQD3vdKT0V03Eiw/EU\nVxVh9sdbkO5h/T8VWI7ghEjr4PpJXPYWRbVlt6uPDpbX+yEiOWG/SsMCAwEAATAN\nBgkqhkiG9w0BAQsFAAOCAQEAEj42FlN8LnNPv3iWttydxm9kEJemyJdw8nPLCC4y\nxigXrcxPgNcoJiDBXLIAwhTmPK1hdn/BndAeUsX8mauuzf4V7Ydw1a999s8Vsj8S\nOLa8voXAE2sjdYZm0cYID0y/ak3+ZrKqCXP6bcmPOLz2plnGJB7TUQ+d8gZXsLA6\nCoopaJOlNM4jPNbX/k30vfFmyrXm2++5stErrSOix25J79DGdmJH896/pmGmB60/\nXGnpyESrVTbhTE+cx0gDHdq5T47qHcXM6CVuH/uYNy5iLCaBRzVQ043gFj3ioym1\nnZ60dsvdG8nEENBu9SzN3Mn24pz0BQ99Qn5ymsQwYAEeDQ==\n-----END CERTIFICATE-----\n", + "partnerDomain": "AUTH" + }, + "requesttime": "2021-03-24T08:24:13.349Z", + "version": "string" + } + ``` + * 4. Get the IDA certificate data from the below URL, Copy it and use it for certificate exchange in the next step. + ```https://minibox.mosip.net/v1/keymanager/getCertificate?applicationId=DIGITAL_CARD``` + + * 5. Upload IDA certificate from the above request in the below SWAGGER URL:- ```https://minibox.mosip.net/v1/partnermanager/swagger-ui.html#/Partner%20Service%20Controller/uploadCACertificat$ ``` + { + "id": "string", + "metadata": {}, + "request": { + "certificateData": "-----BEGIN CERTIFICATE-----\nMIIDTjCCAjagAwIBAgIEYFrxXTANBgkqhkiG9w0BAQsFADBpMQswCQYDVQQGEwJJ\nTjESMBAGA1UECAwJS2FybmF0YWthMRIwEAYDVQQHDA$ + "partnerDomain": "AUTH" + }, + "requesttime": "2021-03-24T08:24:13.349Z", + "version": "string" + } + ``` + * 6. Get the mpartner-default-digitalcard partner certificate data from the below URL, Copy it and use it for certificate exchange in the next step. + ```https://minibox.mosip.net/v1/keymanager/getCertificate?applicationId=DIGITAL_CARD&referenceId=mpartner-default-digitalcard``` + + + * 7. Upload mpartner-default-auth Partner certificate in the below SWAGGER URL:- ```https://minibox.mosip.net/v1/partnermanager/swagger-ui.html#/Partner%20Service%20Controller/uploadPartnerCertificateUsingPOST_1``` Partner_Service_Controller --> /partners/certificate/upload --> with below request + ``` + { + "id": "string", + "metadata": {}, + "request": { + "certificateData": "-----BEGIN CERTIFICATE-----\nMIIDWjCCAkKgAwIBAgIEYFrz6DANBgkqhkiG9w0BAQsFADBpMQswCQYDVQQGEwJJ\nTjESMBAGA1UECAwJS2FybmF0YWthMRIwEAYDVQQHDAlCYW5nYWxvcmUxHjAcBgNV\nBAoMFW1wYXJ0bmVyLWRlZmF1bHQtYWJpczESMBAGA1UEAwwJYWJpcy1yb290MB4X\nDTIxMDMyNDA4MTAxNloXDTIyMDMyNDA4MTAxNlowdTELMAkGA1UEBhMCSU4xEjAQ\nBgNVBAgMCUthcm5hdGFrYTESMBAGA1UEBwwJQmFuZ2Fsb3JlMR4wHAYDVQQKDBVt\ncGFydG5lci1kZWZhdWx0LWFiaXMxHjAcBgNVBAMMFW1wYXJ0bmVyLWRlZmF1bHQt\nYWJpczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKuA8CuDIRQCUCl9\nyVh/dGOb/CiMnbcL/lsLq+VeYo51yyycj5kH2wuTlnXRZAOJklCvhAIJP68q799S\nW+aMr+pOLm4rCgMfPD30UVdcmza+dPfl7A3/YZ5UjALOqjVMmwcUxmh1k5yL9QRo\n1LNLCGkwd0hfgT35Y9sC0CDxD3aOesaz0oP9dkGETpcv8nMW4VxWHvOekup1gqAi\nEn1VBat6qVGjwBNKAVkq75Q8P477DyT+t9NRs9IW68ZQXvR+VQvofDNDk8PshXVQ\nMjesEgQHs7bIhTb6hAmGJsQM97yBAA6+EEGGqvLTZDDXjTAtdNZpjml0jaaMnURl\nzF+qh08CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAjdfHjKlrt7mV0MomYO7KkuCc\naCscPAN74UZaCMRE5pXixeQVctsWE/KI7KdmJwZWqZvQrb/AX4VwZu5A1zcDNOJ6\nB7UaDePCMBXRPcyUAAWWwr0AtV0JkEei3d2TbqiPXqlCM1fvvkKQqGZxa61CvSdN\nz2XmY9W09gbAgkMx3svv6MHpZlJuWY8OZVr0ID1hW+ajEoCf5Adv2Iwuogg/Hs9D\nlhhvYg4GzU/qWE9vFYO52UqtVPfrzQZTPBQE5Hrg0a32HBOwL3vu0ms2gf1lEt23\nEf/8TZA5kT/0bMYlB6heGjIKEC90tEv645jbkgJoCI+GgazTTe9wYHXmgz9oPw==\n-----END CERTIFICATE-----\n", + "partnerDomain": "Auth", + "partnerId": "mpartner-default-digitalcard" + }, + "requesttime": "", + "version": "string" + } + + * 8. Upload the response signinng certificate obtained from the reponse of the above api into the keymanager for mpartner-default-digitalcard partner in keymanager using below Swagger URL: https://minbox.mosip.net/v1/keymanager/swagger-ui.html#/keymanager/uploadCertificateUsingPOST``` keymanager --> /uploadCertificate with below request + ``` + { + "id": "string", + "metadata": {}, + "request": { + "applicationId": "DIGITAL_CARD", + "certificateData": "certficate data fom the responce of step 7", + "referenceId": "mpartner-dafault-digitalcard" + }, + "requesttime": "2018-12-10T06:12:52.994Z", + "version": "string" + } + ``` + +# Troubleshooting + +- Please check if the domain name is correctly replaced. +- In case of errors related to timestamp please update the latest timestamp in the request. +- If the Swagger links are not available check if the services are running fine. + swagger 1:- kernel-Auth-service. + swagger 2:- pms services. + swagger 3:- pms services. + get certificate request :- keymanager services. +- In case you gett error in certifacte upload for either of ROOT, IDA, mpartner-default-digitalcard reponse as ```certificate data already exist``` pls ignore as the certifcate exchange is done once. +- As of now this is WIP on this document. +- For other descrepencies raise a github issue. +- Below is the example of how to get the get the certificate data from the response. + ``` + {"id":null,"version":null,"responsetime":"2021-04-18T10:03:20.606Z","metadata":null,"response":{"certificate":"~~~-----BEGIN CERTIFICATE-----\nMIIDkDCCAnigAwIBAgIIzui2vr6fKUMwDQYJKoZIhvcNAQELBQAwbjELMAkGA1UE\nBhMCSU4xCzAJBgNVBAgMAktBMRIwEAYDVQQHDAlCQU5HQUxPUkUxDTALBgNVBAoM\nBElJVEIxGjAYBgNVBAsMEU1PU0lQLVRFQ0gtQ0VOVEVSMRMwEQYDVQQDDApNT1NJ\nUC1ST09UMB4XDTIwMTIxNTE1NDcxOVoXDTI1MTIxNTE1NDcxOVowbjELMAkGA1UE\nBhMCSU4xCzAJBgNVBAgMAktBMRIwEAYDVQQHDAlCQU5HQUxPUkUxDTALBgNVBAoM\nBElJVEIxGjAYBgNVBAsMEU1PU0lQLVRFQ0gtQ0VOVEVSMRMwEQYDVQQDDApNT1NJ\nUC1ST09UMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0IG5QpRMA1dZ\n2FRitMuNlzOCr+qsEZnFGdUH6npYMgNPSw7kJAHpo2CAo4WBNAgz6i1fASSqb8EZ\nXmxnKC9qW31zf8xmnwJNDMFIYctZTp1ZVG7yox+HeI4u//XymAGEg0U/bJ9FVpYr\n6TIbFIO7HzbB12qEwEmvniWKILqzf7qY6F+62GrJyFIwdpWkmlDMUdU4L9V3R10S\nwrNOTDkbHnLb34uwtBpaMHmYgOasaOXxCNcEzdOf56w6RTJmSla9TJgeXn0hikF1\ntxlHkv3Bw2T4y7eVL7NZeMhKkJJW0J4+hWm6nWzRG3Su31HoUIph1GFhrVrq/84B\nlOqHvpDIcwIDAQABozIwMDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS+Kdh1\nX3eiq7UDZ3jBJwoKzFjLaDANBgkqhkiG9w0BAQsFAAOCAQEASzHHVt79eqzzYKLi\ncquGoS31Flq+EKrUdm5zLIYQx9lolVmRveJEqE85x02dGu8MMWjsshQvnzbG0PET\nR5kED5tVRSYx1W/Da5uE7EzQpiYeKsakmSArnslB0kFB+8UGb3KlmCUrQC0C4Ufo\ngbl2zEj9slLgjHKYbvGlki3Sz0oFAdEjuBdbWOrOaMQMUu7OZjMl/scyMBAR0U5J\nURVAGbEniMrw7a1z3LynVerc1qDAbuX1l4njUnit+JbB9B7QPbTEKjce1/pdyvUc\n9SbJpoznaFTRNFyq1iI98hsk+Iu9AImohiCV2DsvVULzACVQXhdApbVZBqjHAHbn\nkQcdtw==\n-----END CERTIFICATE-----\n~~~","certSignRequest":null,"issuedAt":"2020-12-15T15:47:19.000Z","expiryAt":"2025-12-15T15:47:19.000Z","timestamp":"2021-04-18T10:03:20.606Z"},"errors":[]} + ``` + highlighted data in the above response is the example certificate data required. diff --git a/deployment/v3/docs/external services configuration.md b/deployment/v3/docs/external services configuration.md index 0c61e671f..d392b51be 100644 --- a/deployment/v3/docs/external services configuration.md +++ b/deployment/v3/docs/external services configuration.md @@ -7,7 +7,7 @@ This document describes how to configure external services like MinIO, Keycloak, * Go to `mosip-config` and checkout to a specific branch to update the `config` properties. * The default value for the database hostname is `postgres-postgresql.postgres` and the default value for the database port number are `5432`. -* If you are using an external service, you need to update the hostname and port number via `sed` command mentioned below and also provide the external-hostname along with external-port. +* If you are using an external service, you need to update the hostname and port number via `sed` command mentioned below and also provide the external-hostname along with external-port. ``` cd mosip-config sed -i 's/postgres-postgresql.postgres//g' * @@ -64,7 +64,7 @@ This document describes how to configure external services like MinIO, Keycloak, data: admin-password: {{ base64 encoded admin password }} ``` - >Note: + >Note: - The admin-password is the password of the admin user of the master realm in the keycloak. * Proceed with **keycloak_init.sh** script from [here](../external/iam/README.md#keycloak-init). @@ -74,7 +74,7 @@ This document describes how to configure external services like MinIO, Keycloak, * Go to `mosip-config` and checkout to a specific branch to update the `config` properties. * The default value for the s3 URL is `object.store.s3.url=http://minio.minio:9000`. -* If you are using an external service, you need to update the s3 URL via `sed` command below and also provide the external URL. +* If you are using an external service, you need to update the s3 URL via `sed` command below and also provide the external URL. ``` cd mosip-config sed -i 's/http://minio.minio//g' * diff --git a/deployment/v3/docs/images/gc-util.png b/deployment/v3/docs/images/gc-util.png new file mode 100644 index 000000000..f245953fb Binary files /dev/null and b/deployment/v3/docs/images/gc-util.png differ diff --git a/deployment/v3/docs/images/mailserver-1.png b/deployment/v3/docs/images/mailserver-1.png new file mode 100644 index 000000000..1315abf24 Binary files /dev/null and b/deployment/v3/docs/images/mailserver-1.png differ diff --git a/deployment/v3/docs/images/mailserver-2.png b/deployment/v3/docs/images/mailserver-2.png new file mode 100644 index 000000000..8ed002fe4 Binary files /dev/null and b/deployment/v3/docs/images/mailserver-2.png differ diff --git a/deployment/v3/docs/images/mailserver-3.png b/deployment/v3/docs/images/mailserver-3.png new file mode 100644 index 000000000..73f32fa60 Binary files /dev/null and b/deployment/v3/docs/images/mailserver-3.png differ diff --git a/deployment/v3/docs/images/mailserver-4.png b/deployment/v3/docs/images/mailserver-4.png new file mode 100644 index 000000000..4c9256a8b Binary files /dev/null and b/deployment/v3/docs/images/mailserver-4.png differ diff --git a/deployment/v3/docs/images/mailserver-5.png b/deployment/v3/docs/images/mailserver-5.png new file mode 100644 index 000000000..a678e21dd Binary files /dev/null and b/deployment/v3/docs/images/mailserver-5.png differ diff --git a/deployment/v3/docs/images/mailserver-6.png b/deployment/v3/docs/images/mailserver-6.png new file mode 100644 index 000000000..7aab6edec Binary files /dev/null and b/deployment/v3/docs/images/mailserver-6.png differ diff --git a/deployment/v3/docs/images/mailserver-7.png b/deployment/v3/docs/images/mailserver-7.png new file mode 100644 index 000000000..1947a6f05 Binary files /dev/null and b/deployment/v3/docs/images/mailserver-7.png differ diff --git a/deployment/v3/docs/images/mailserver-8.png b/deployment/v3/docs/images/mailserver-8.png new file mode 100644 index 000000000..516d2280d Binary files /dev/null and b/deployment/v3/docs/images/mailserver-8.png differ diff --git a/deployment/v3/docs/images/mailserver-9.png b/deployment/v3/docs/images/mailserver-9.png new file mode 100644 index 000000000..458d625ba Binary files /dev/null and b/deployment/v3/docs/images/mailserver-9.png differ diff --git a/deployment/v3/docs/install-gui-desktop-on-ubuntu.md b/deployment/v3/docs/install-gui-desktop-on-ubuntu.md new file mode 100644 index 000000000..cc9f5c55f --- /dev/null +++ b/deployment/v3/docs/install-gui-desktop-on-ubuntu.md @@ -0,0 +1,42 @@ +# Install GUI desktop on ubuntu + +* Install `tightvncserver` & its dependency packages. + ``` + $ sudo apt-get update + $ sudo apt install -y gnome-panel gnome-settings-daemon metacity nautilus gnome-terminal ubuntu-desktop + $ sudo apt-get install -y tightvncserver + ``` +* Setup `vncserver`. + ``` + $ vncserver + + You will require a password to access your desktops. + Password: + Warning: password truncated to the length of 8. + Verify: + Would you like to enter a view-only password (y/n)? n + xauth: file /home/ubuntu/.Xauthority does not exist + + New 'X' desktop is ip-172-31-13-234:1 + + Creating default startup script /home/ubuntu/.vnc/xstartup + Starting applications specified in /home/ubuntu/.vnc/xstartup + Log file is /home/ubuntu/.vnc/ip-172-31-13-234:1.log + ``` + ``` + $ vim ~/.vnc/xstartup + + #!/bin/sh + export XKL_XMODMAP_DISABLE=1 + export XDG_CURRENT_DESKTOP="GNOME-Flashback:GNOME" + export XDG_MENU_PREFIX="gnome-flashback-" + gnome-session --session=gnome-flashback-metacity --disable-acceleration-check & + ``` +* Restart `vncserver` via below commands: + ``` + $ vncserver -kill :1 + $ vncserver :1 + $ sudo reboot + ``` +* Ensure to expose tcp port `5901`. +* If the system undergoes a reboot, please execute the `vncserver :1` command to manually initiate the vncserver. diff --git a/deployment/v3/docs/rancher-view-only-user/README.MD b/deployment/v3/docs/rancher-view-only-user/README.MD new file mode 100644 index 000000000..57d8b02a1 --- /dev/null +++ b/deployment/v3/docs/rancher-view-only-user/README.MD @@ -0,0 +1,25 @@ +# Rancher View only user + + + +## Add view only user on cluster API using rancher + + + +Rancher, by default, has `Owner`, `Member`, and `Custom` cluster permission, but none of these grants read-only to a user. + +## Prerequisite, + + * You need to have admin access on the rancher itself (this is separate from the cluster-admin). + +## Create View only user + +Login as admin, go to `Users & Authentication` under `Configuration` -> `Create Cluster Role` + + * Name: give a name for a role i.e., view-only + * Under Grat Resources, select `get`, `list`, and `watch` as verbs and `*` for `Resources` and `API Groups` + +Save newly created role, and role `view_only` will be displayed as part of defined roles when adding a user to the cluster. + + +![](img/rancher_role.png) \ No newline at end of file diff --git a/deployment/v3/docs/rancher-view-only-user/img/rancher_role.png b/deployment/v3/docs/rancher-view-only-user/img/rancher_role.png new file mode 100644 index 000000000..3b13bf46d Binary files /dev/null and b/deployment/v3/docs/rancher-view-only-user/img/rancher_role.png differ diff --git a/deployment/v3/docs/setup-email-server-via-apache-james.md b/deployment/v3/docs/setup-email-server-via-apache-james.md new file mode 100644 index 000000000..0e57f1ceb --- /dev/null +++ b/deployment/v3/docs/setup-email-server-via-apache-james.md @@ -0,0 +1,87 @@ +# Setup email server via apache James on ubuntu + +* Ensure to install [docker](https://docs.docker.com/engine/install/ubuntu/) on ubuntu machine. +* Create `MX`,`PTR`, & `TXT` DNS records which points to public IP of ubuntu machine. + ![mailserver-1.png](images/mailserver-1.png) + ![mailserver-2.png](images/mailserver-2.png) + ![mailserver-3.pnp](images/mailserver-3.png)
+ verify via below command: + ``` + $ host -t MX mail.soil.mosip.net + mail.soil.mosip.net mail is handled by 10 13.201.190.170. + ``` +* Run the below docker command to set up mail server. + ``` + docker run -d --name james -p 143:143 -p 587:587 apache/james:demo-3.8.0 + ``` +* Ensure to open firewall ports via below command: + ``` + sudo ufw allow 143/tcp + sudo ufw allow 587/tcp + ``` +* Login to James docker to create domain & user to James server. + ``` + ubuntu@ip-172-31-15-176:~$ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 5bf8e2fd568e apache/james:demo-3.8.0 "./startup.sh" 7 minutes ago Up 7 minutes 25/tcp, 80/tcp, 0.0.0.0:143->143/tcp, :::143->143/tcp, 465/tcp, 993/tcp, 4000/tcp, 8000/tcp, 0.0.0.0:587->587/tcp, :::587->587/tcp james + + ubuntu@ip-172-31-15-176:~$ docker exec -it james bash + root@5bf8e2fd568e:~# + ``` + ``` + root@5bf8e2fd568e:~# james-cli -h 127.0.0.1 -p 9999 AddDomain mail.soil.mosip.net + AddDomain command executed sucessfully in 504 ms. + + root@5bf8e2fd568e:~# james-cli -h 127.0.0.1 -p 9999 AddUser admin@mail.soil.mosip.net + AddUser command executed sucessfully in 474 ms. + ``` +* Open `thunderbird` application to login in to your mail server account. +* Goto `Settings` ---> `Account Settings` ---> `Account Actions` ---> `Add Mail Account`. +* Provide server details, login credentials as shown below:
+ ![mailserver-4.png](images/mailserver-4.png) + ![mailserver-5.png](images/mailserver-5.png) +* Click on test/re-test to verify server accessibility.
+ ![mailserver-6.png](images/mailserver-6.png) +* Tick `permanently store this exception` and click on `Confirm Security Exception. + ![mailserver-7.png](images/mailserver-7.png) +* Tick `permanently store this exception` and click on `Confirm Security Exception. + You will receive this while sending a mail from your account.
+ ![mailserver-8.png](images/mailserver-8.png) +* Try sending/receiving mails from one account to another. + + +# Troubleshooting + +If you are experiencing difficulties with sending or receiving emails, it is possible that the mail server of either the sender or receiver has blocked your mail server. + For example: + ``` + Hi. This is the James mail server at 0062d7fb41a3. + I'm afraid I wasn't able to deliver your message to the following addresses. + This is a permanent error; I've given up. Sorry it didn't work out. Below + I include the list of recipients and the reason why I was unable to deliver + your message. + + Original email subject: as + + Failed recipient(s): + syedsalman041997@gmail.com + + Error message: + 550-5.7.26 This mail has been blocked because the sender is unauthenticated. + 550-5.7.26 Gmail requires all senders to authenticate with either SPF or DKIM. + 550-5.7.26 + 550-5.7.26 Authentication results: + 550-5.7.26 DKIM = did not pass + 550-5.7.26 SPF [mail.camdgc-dev.mosip.net] with ip: [164.52.204.214] = did not + 550-5.7.26 pass + 550-5.7.26 + 550-5.7.26 For instructions on setting up authentication, go to + 550 5.7.26 https://support.google.com/mail/answer/81126#authentication d12-20020a170903230c00b001db420e7552si10082865plh.65 - gsmtp + ``` + +To remove your mail server IP from the Office 365 Anti-Spam IP De-list Portal, please follow these steps: + +1. Access the portal through this link: [Office 365 Anti-Spam IP De-list Portal](https://sender.office.com/) +2. Enter your mail server Email ID, public IP, and click on the Submit button. +3. Look out for a confirmation email and click on the provided link. +4. The IP removal process typically takes around 30 minutes to complete. diff --git a/deployment/v3/external/README.md b/deployment/v3/external/README.md index e24de531b..6d4e6c5a4 100644 --- a/deployment/v3/external/README.md +++ b/deployment/v3/external/README.md @@ -17,7 +17,6 @@ * [ABIS](abis/README.md) * [Message Gateways](msg-gateway/README.md) * [docker secrets](docker-secrets/README.md) -* [captcha](captcha/README.md) * [Landing page](landing-page/README.md) ## Install * Run `install-all.sh` script to install in defined sequence. diff --git a/deployment/v3/external/all/install-all.sh b/deployment/v3/external/all/install-all.sh index c0b5dbc8f..9c4d5875e 100755 --- a/deployment/v3/external/all/install-all.sh +++ b/deployment/v3/external/all/install-all.sh @@ -31,7 +31,7 @@ function installing_all() { cd $ROOT_DIR/antivirus/clamav ./install.sh - cd $ROOT_DIR/activemq + cd $ROOT_DIR/activemq/ ./install.sh cd $ROOT_DIR/kafka @@ -51,8 +51,9 @@ function installing_all() { ./install.sh cd $ROOT_DIR/landing-page + ./install.sh - + cd $ROOT_DIR/captcha ./install.sh @@ -67,4 +68,3 @@ set -o nounset ## set -u : exit the script if you try to use an uninitialised set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes installing_all # calling function - diff --git a/deployment/v3/external/antivirus/clamav/install.sh b/deployment/v3/external/antivirus/clamav/install.sh index 206aa2e9b..34e8b3de6 100755 --- a/deployment/v3/external/antivirus/clamav/install.sh +++ b/deployment/v3/external/antivirus/clamav/install.sh @@ -7,10 +7,10 @@ if [ $# -ge 1 ] ; then fi NS=clamav -CHART_VERSION=2.4.1 +CHART_VERSION=3.1.0 echo Create $NS namespace -kubectl create ns $NS +kubectl create ns $NS function installing_Clamav() { echo Istio label diff --git a/deployment/v3/external/antivirus/clamav/values.yaml b/deployment/v3/external/antivirus/clamav/values.yaml index 2e815e143..a1f378af9 100644 --- a/deployment/v3/external/antivirus/clamav/values.yaml +++ b/deployment/v3/external/antivirus/clamav/values.yaml @@ -2,7 +2,7 @@ ## Increase in production replicaCount: 1 ## We are using official clamav docker instead of mailu/clamav that was present originally -image: - repository: clamav/clamav - tag: latest - pullPolicy: Always +#image: + # repository: clamav/clamav + # tag: 1.2 + # pullPolicy: Always diff --git a/deployment/v3/external/data-archive/README.md b/deployment/v3/external/data-archive/README.md new file mode 100644 index 000000000..cb97246cf --- /dev/null +++ b/deployment/v3/external/data-archive/README.md @@ -0,0 +1,62 @@ +# Database Archiving Configuration + +This configuration file is used for setting up database connections and defining archiving parameters. Please follow the guidelines below to fill in the required information. + +## Database Connections + +### Archive Database Connection (archive_db) + +- `db_name`: Name of the archive database. +- `host`: Destination host for the archive database. +- `port`: Port number for the archive database connection. +- `su_user`: Superuser for the archive database. +- `su_user_pwd`: Password for the superuser. +- `db_pwd`: Password for the archive database. +- `archivehost`: Destination host for the archive database. +- `archiveport`: Port number for the archive database connection. +- `archiveuname`: Archive database username. +- `archive_dbname`: Archive database name. +- `archive_schemaname`: Archive schema name. +- `archive_db_password`: Password for the archive database. + +### Source Database Connections (source_db) + +For each source database (audit, credential, esignet, ida, idrepo, kernel, master, pms, prereg, regprc, resident), provide the following information: + +- `source__host`: Source database host. +- `source__port`: Port number for the source database connection. +- `source__uname`: Source database username. +- `source__dbname`: Source database name. +- `source__schemaname`: Source schema name. +- `source__db_pass`: Password for the source database. + +- `provide_db_names_to_archive`: Comma-separated list of database names to archive (e.g., "AUDIT,CREDENTIAL,IDA,.....").(in CAPS) + + +## Container Volume Path +container_volume_path: Path where JSON files containing information about all databases will be stored + +## Archiving Information (all_db_tables_info) + +For each database, specify tables_info with details for archiving. Example: + +```yaml +audit: + tables_info: + - source_table: "app_audit_log" + archive_table: "mosip_audit_app_audit_log" + id_column: "log_id" + date_column: "log_dtimes" + retention_days: 30 + operation_type: "archive_delete" + +source_table: Name of the table in the source database. +archive_table: Name of the table in the archive database. +id_column: Column representing the unique identifier. +date_column: Column representing the date of the record. +retention_days: Number of days to retain the archived data. +operation_type: Type of operation for archiving (e.g., archive_delete, delete, none). +- Delete: Delete records from the source table. +- Archive and Delete: Archive records to an archive table and then delete them from the source table. +- Archive (No Delete): Archive records to an archive table without deleting them from the source table. +- None: Skip archival for the specified table. diff --git a/deployment/v3/external/data-archive/delete.sh b/deployment/v3/external/data-archive/delete.sh new file mode 100755 index 000000000..521fad2ed --- /dev/null +++ b/deployment/v3/external/data-archive/delete.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Uninstalls data-archive +## Usage: ./delete.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +function deleting_data-archive() { + NS=data-archive + while true; do + read -p "Are you sure you want to delete data-archive helm charts?(Y/n) " yn + if [ $yn = "Y" ] + then + helm -n $NS delete data-archive + break + else + break + fi + done + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +deleting_data-archive # calling function diff --git a/deployment/v3/external/data-archive/install.sh b/deployment/v3/external/data-archive/install.sh new file mode 100755 index 000000000..0ed86cd9d --- /dev/null +++ b/deployment/v3/external/data-archive/install.sh @@ -0,0 +1,82 @@ +#!/bin/bash +# Installs data-archive +## Usage: ./install.sh [kubeconfig] + +if [ $# -ge 1 ]; then + export KUBECONFIG=$1 +fi + +NS=data-archive +CHART_VERSION=1.0.0 + +echo Create $NS namespace +kubectl create ns $NS + +function installing_data-archive() { + echo Updating repos + helm repo add mosip https://mosip.github.io/mosip-helm + helm repo update + + read -p "Is values.yaml for data-archive chart set correctly as part of Pre-requisites?(Y/n) " yn; + if [ "$yn" != "Y" ]; then + echo "ERROR: values.yaml not set correctly; EXITING;"; + exit 1; + fi + + read -p "Please enter the time(hr) to run the cronjob every day (time: 0-23) : " time + if [ -z "$time" ]; then + echo "ERROR: Time cannot be empty; EXITING;"; + exit 1; + fi + if ! [ $time -eq $time ] 2>/dev/null; then + echo "ERROR: Time $time is not a number; EXITING;"; + exit 1; + fi + if [ $time -gt 23 ] || [ $time -lt 0 ]; then + echo "ERROR: Time should be in range ( 0-23 ); EXITING;"; + exit 1; + fi + + read -p "Is archival running for sandbox installation? (Y/N): " archival_running + if [ "$archival_running" == "Y" ]; then + echo "Sandbox installation selected. This will use superuser PostgreSQL secrets for creating archivedb." + super_user_password=$(kubectl get secret --namespace postgres postgres-postgresql -o jsonpath={.data.postgres-password} | base64 --decode) + echo "Common secrets will be used as passwords for all the db users." + db_common_password=$(kubectl get secret --namespace postgres db-common-secrets -o jsonpath={.data.db-dbuser-password} | base64 --decode) + set_db_pwd="--set databases.archive_db.su_user_pwd=$super_user_password \ + --set databases.source_db.source_audit_db_pass=$db_common_password \ + --set databases.source_db.source_credential_db_pass=$db_common_password \ + --set databases.source_db.source_esignet_db_pass=$db_common_password \ + --set databases.source_db.source_ida_db_pass=$db_common_password \ + --set databases.source_db.source_idrepo_db_pass=$db_common_password \ + --set databases.source_db.source_kernel_db_pass=$db_common_password \ + --set databases.source_db.source_master_db_pass=$db_common_password \ + --set databases.source_db.source_pms_db_pass=$db_common_password \ + --set databases.source_db.source_prereg_db_pass=$db_common_password \ + --set databases.source_db.source_regprc_db_pass=$db_common_password \ + --set databases.source_db.source_resident_db_pass=$db_common_password \ + --set databases.archive_db.db_pwd=$db_common_password \ + --set databases.archive_db.archive_db_password=$db_common_password" + + elif [ "$archival_running" == "N" ]; then + echo "Other installation selected.This will Use individual secrets for db passwords from values.yaml" + set_db_pwd="" + else + echo "Incorrect input; EXITING;" + exit 1; + fi + + # Install data-archive + helm -n $NS install data-archive mosip/data-archive --set crontime="0 $time * * *" -f values.yaml $set_db_pwd --version $CHART_VERSION + + echo Installed data-archive + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +installing_data-archive # calling function diff --git a/deployment/v3/external/data-archive/values.yaml b/deployment/v3/external/data-archive/values.yaml new file mode 100644 index 000000000..64e3cd4f5 --- /dev/null +++ b/deployment/v3/external/data-archive/values.yaml @@ -0,0 +1,281 @@ +databases: + archive_db: + db_name: "mosip_archive" + host: "" + port: 5432 + su_user: "postgres" + su_user_pwd: "" + db_pwd: "" + dml: 0 + archivehost: "" + archiveport: 5432 + archiveuname: "archiveuser" + archive_dbname: "mosip_archive" + archive_schemaname: "archive" + archive_db_password: "" + source_db: + provide_db_names_to_archive: "AUDIT,CREDENTIAL,IDA" + source_audit_host: "" + source_audit_port: 5432 + source_audit_uname: "audituser" + source_audit_dbname: "mosip_audit" + source_audit_schemaname: "audit" + source_audit_db_pass: "" + source_credential_host: "" + source_credential_port: 5432 + source_credential_uname: "credentialuser" + source_credential_dbname: "mosip_credential" + source_credential_schemaname: "credential" + source_credential_db_pass: "" + source_esignet_host: "" + source_esignet_port: 5432 + source_esignet_uname: "esignetuser" + source_esignet_dbname: "mosip_esignet" + source_esignet_schemaname: "esignet" + source_esignet_db_pass: "" + source_ida_host: "" + source_ida_port: 5432 + source_ida_uname: "idauser" + source_ida_dbname: "mosip_ida" + source_ida_schemaname: "ida" + source_ida_db_pass: "" + source_idrepo_host: "" + source_idrepo_port: 5432 + source_idrepo_uname: "idrepouser" + source_idrepo_dbname: "mosip_idrepo" + source_idrepo_schemaname: "idrepo" + source_idrepo_db_pass: "" + source_kernel_host: "" + source_kernel_port: 5432 + source_kernel_uname: "kerneluser" + source_kernel_dbname: "mosip_kernel" + source_kernel_schemaname: "kernel" + source_kernel_db_pass: "" + source_master_host: "" + source_master_port: 5432 + source_master_uname: "masteruser" + source_master_dbname: "mosip_master" + source_master_schemaname: "master" + source_master_db_pass: "" + source_pms_host: "" + source_pms_port: 5432 + source_pms_uname: "pmsuser" + source_pms_dbname: "mosip_pms" + source_pms_schemaname: "pms" + source_pms_db_pass: "" + source_prereg_host: "" + source_prereg_port: 5432 + source_prereg_uname: "prereguser" + source_prereg_dbname: "mosip_prereg" + source_prereg_schemaname: "prereg" + source_prereg_db_pass: "" + source_regprc_host: "" + source_regprc_port: 5432 + source_regprc_uname: "regprcuser" + source_regprc_dbname: "mosip_regprc" + source_regprc_schemaname: "regprc" + source_regprc_db_pass: "" + source_resident_host: "" + source_resident_port: 5432 + source_resident_uname: "residentuser" + source_resident_dbname: "mosip_resident" + source_resident_schemaname: "resident" + source_resident_db_pass: "" + container_volume_path: "/all-db-info-json" + all_db_tables_info: + audit: + tables_info: + - source_table: "app_audit_log" + archive_table: "mosip_audit_app_audit_log" + id_column: "log_id" + date_column: "log_dtimes" + retention_days: 30 + operation_type: "archive_delete" + credential: + tables_info: + - source_table: "credential_transaction" + archive_table: "mosip_credential_credential_transaction" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 30 + operation_type: "archive_delete" + esignet: + tables_info: + - source_table: "consent_history" + archive_table: "mosip_esignet_consent_history" + id_column: "id" + date_column: "cr_dtimes" + retention_days: 30 + operation_type: "none" + ida: + tables_info: + - source_table: "credential_event_store" + archive_table: "mosip_ida_credential_event_store" + id_column: "event_id" + date_column: "cr_dtimes" + retension_days: 30 + operation_type: "archive_delete" + - source_table: "otp_transaction" + archive_table: "mosip_ida_otp_transaction" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 30 + operation_type: 'delete' + idrepo: + tables_info: + - source_table: "anonymous_profile" + archive_table: "mosip_idrepo_anonymous_profile" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 30 + operation_type: "archive_delete" + - source_table: "credential_request_status" + archive_table: "mosip_idrepo_credential_request_status" + id_column: "individual_id" + date_column: "cr_dtimes" + retension_days: 30 + operation_type: "archive_delete" + - source_table: "uin_draft" + archive_table: "mosip_idrepo_uin_draft" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 30 + operation_type: "archive_delete" + kernel: + tables_info: + - source_table: "otp_transaction" + archive_table: "mosip_kernel_otp_transaction" + id_column: "id" + date_column: "generated_dtimes" + retension_days: 7 + operation_type: "delete" + master: + tables_info: + - source_table: "bulkupload_transaction" + archive_table: "mosip_master_bulkupload_transaction" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 91 + operation_type: "archive_delete" + - source_table: "device_master_h" + archive_table: "mosip_master_device_master_h" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 365 + operation_type: "archive_delete" + - source_table: "machine_master_h" + archive_table: "mosip_master_machine_master_h" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 183 + operation_type: "archive_delete" + - source_table: "registration_center_h" + archive_table: "mosip_master_registration_center_h" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 365 + operation_type: "archive_delete" + - source_table: "user_detail_h" + archive_table: "mosip_master_user_detail_h" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 183 + operation_type: "archive_delete" + - source_table: "zone_user_h" + archive_table: "mosip_master_zone_user_h" + id_column: "usr_id" + date_column: "cr_dtimes" + retension_days: 183 + operation_type: "archive_delete" + pms: + tables_info: + - source_table: "auth_policy_h" + archive_table: "mosip_pms_auth_policy_h" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 183 + operation_type: "archive_delete" + - source_table: "secure_biometric_interface_h" + archive_table: "mosip_pms_secure_biometric_interface_h" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 183 + operation_type: "archive_delete" + - source_table: "partner_h" + archive_table: "mosip_pms_partner_h" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 183 + operation_type: "archive_delete" + prereg: + tables_info: + - source_table: "otp_transaction" + archive_table: "mosip_prereg_otp_transaction" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 30 + operation_type: "delete" + regprc: + tables_info: + - source_table: "abis_response_det" + archive_table: "mosip_regprc_abis_response_det" + id_column: "abis_resp_id" + date_column: "cr_dtimes" + retension_days: 183 + operation_type: "archive_delete" + - source_table: "abis_response" + archive_table: "mosip_regprc_abis_response" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 183 + operation_type: "archive_delete" + - source_table: "abis_request" + archive_table: "mosip_regprc_abis_request" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 183 + operation_type: "archive_delete" + - source_table: "reg_demo_dedupe_list" + archive_table: "mosip_regprc_reg_demo_dedupe_list" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 183 + operation_type: "archive_delete" + - source_table: "registration_transaction" + archive_table: "mosip_regprc_registration_transaction" + id_column: "regtrn_id" + date_column: "cr_dtimes" + retension_days: 183 + operation_type: "archive_delete" + resident: + tables_info: + - source_table: "otp_transaction" + archive_table: "mosip_resident_otp_transaction" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 30 + operation_type: "delete" + - source_table: "resident_grievance_ticket" + archive_table: "mosip_resident_grievance_ticket" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 365 + operation_type: "archive_delete" + - source_table: "resident_session" + archive_table: "mosip_resident_session" + id_column: "session_id" + date_column: "login_dtimes" + retension_days: 30 + operation_type: "archive_delete" + - source_table: "resident_transaction" + archive_table: "mosip_resident_transaction" + id_column: "id" + date_column: "cr_dtimes" + retension_days: 365 + operation_type: "archive_delete" + - source_table: "resident_user_actions" + archive_table: "mosip_resident_user_actions" + id_column: "ida_token" + date_column: "last_bell_notif_click_dtimes" + retension_days: 365 + operation_type: "archive_delete" \ No newline at end of file diff --git a/deployment/v3/external/docker-secrets/delete.sh b/deployment/v3/external/docker-secrets/delete.sh index 8ed9c267f..19ef13b31 100755 --- a/deployment/v3/external/docker-secrets/delete.sh +++ b/deployment/v3/external/docker-secrets/delete.sh @@ -11,7 +11,7 @@ function deleting_secrets() { read -p "Are you sure you want to delete regsecret?(Y/n) " yn if [ $yn = "Y" ] then - kubectl delete secret regsecret + kubectl delete secret regsecret break else break @@ -26,4 +26,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_secrets # calling function \ No newline at end of file +deleting_secrets # calling function diff --git a/deployment/v3/external/docker-secrets/install.sh b/deployment/v3/external/docker-secrets/install.sh index 1d9ee30d3..f428f99c9 100755 --- a/deployment/v3/external/docker-secrets/install.sh +++ b/deployment/v3/external/docker-secrets/install.sh @@ -14,7 +14,7 @@ function installing_secrets() { i=$((i+1)) echo "Enter docker registry URL (e.g. https://index.docker.io/v1/ for dockerhub)" read DOCKER_REGISTRY_URL - echo Enter docker registry username + echo Enter docker registry username read USERNAME echo Enter docker registry Password/Token read PASSWORD @@ -33,4 +33,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -installing_secrets # calling function \ No newline at end of file +installing_secrets # calling function diff --git a/deployment/v3/external/hsm/softhsm/README.md b/deployment/v3/external/hsm/softhsm/README.md index 84136930b..2d3887488 100644 --- a/deployment/v3/external/hsm/softhsm/README.md +++ b/deployment/v3/external/hsm/softhsm/README.md @@ -9,7 +9,7 @@ sh install.sh * Keys are created in the mounted PV which gets mounted at `/softhsm/tokens` inside the container. * Random PIN generated if not specified. Set `securityPIN` in `values.yaml`. -# Backup SoftHSM +## Backup SoftHSM #### Backup * Update the below variables @@ -35,3 +35,4 @@ sh install.sh * Execute the following command to restore SoftHSM from backup. ``` kubectl --kubeconfig=$KUBECONFIG -n $NS cp ./softhsm-kernel/tokens $POD_NAME:softhsm/tokens + ``` diff --git a/deployment/v3/external/hsm/softhsm/delete.sh b/deployment/v3/external/hsm/softhsm/delete.sh index e9aad7035..9e71e373d 100755 --- a/deployment/v3/external/hsm/softhsm/delete.sh +++ b/deployment/v3/external/hsm/softhsm/delete.sh @@ -14,6 +14,7 @@ function deleting_softhsm() { then helm -n $NS delete softhsm-kernel helm -n $NS delete softhsm-ida + helm -n $NS delete softhsm-idp break else break diff --git a/deployment/v3/external/iam/export.sh b/deployment/v3/external/iam/export.sh index 82a5a32be..d942f666c 100755 --- a/deployment/v3/external/iam/export.sh +++ b/deployment/v3/external/iam/export.sh @@ -77,4 +77,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -export_keycloak # calling function \ No newline at end of file +export_keycloak # calling function diff --git a/deployment/v3/external/iam/import-init-values.yaml b/deployment/v3/external/iam/import-init-values.yaml index 0ab23c4b6..bc58c7bf3 100644 --- a/deployment/v3/external/iam/import-init-values.yaml +++ b/deployment/v3/external/iam/import-init-values.yaml @@ -359,7 +359,6 @@ keycloak: assign_client_scopes: - send_binding_otp - wallet_binding - - name: mosip-resident-client mappers: [] saroles: @@ -501,11 +500,10 @@ keycloak: - PMS_USER - uma_authorization - offline_access - sa_client_roles: - - realm-management: ## realm-management client id - - view-users # realm-management client roles - - view-clients - - view-realm - - manage-users - + sa_client_roles: + - realm-management: ## realm-management client id + - view-users # realm-management client roles + - view-clients + - view-realms + - manage-users users: [] diff --git a/deployment/v3/external/iam/install.sh b/deployment/v3/external/iam/install.sh index 0f1a7dda4..1268c6ee2 100755 --- a/deployment/v3/external/iam/install.sh +++ b/deployment/v3/external/iam/install.sh @@ -19,7 +19,7 @@ function installing_keycloak() { helm repo update echo Installing - helm -n $NS install $SERVICE_NAME mosip/keycloak --version "7.1.18" --set image.repository=mosipid/mosip-artemis-keycloak --set image.tag=1.2.0.1 --set image.pullPolicy=Always -f values.yaml --wait + helm -n $NS install $SERVICE_NAME mosip/keycloak --version "7.1.18" --set image.repository=mosipqa/mosip-artemis-keycloak --set image.tag=develop --set image.pullPolicy=Always -f values.yaml --wait EXTERNAL_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-iam-external-host}) echo Install Istio gateway, virtual service diff --git a/deployment/v3/external/iam/keycloak_init.sh b/deployment/v3/external/iam/keycloak_init.sh index f4869efe6..92935074d 100755 --- a/deployment/v3/external/iam/keycloak_init.sh +++ b/deployment/v3/external/iam/keycloak_init.sh @@ -38,7 +38,7 @@ read_user_input(){ function initialize_keycloak() { NS=keycloak - CHART_VERSION=12.0.1 + CHART_VERSION=0.0.1-develop helm repo add mosip https://mosip.github.io/mosip-helm helm repo update diff --git a/deployment/v3/external/iam/upgrade-init-values.yaml b/deployment/v3/external/iam/upgrade-init-values.yaml index 1094cfea3..4060693e7 100644 --- a/deployment/v3/external/iam/upgrade-init-values.yaml +++ b/deployment/v3/external/iam/upgrade-init-values.yaml @@ -228,6 +228,7 @@ keycloak: - PUBLISH_REGISTRATION_PROCESSOR_WORKFLOW_COMPLETED_EVENT_GENERAL - PUBLISH_CREDENTIAL_STATUS_UPDATE_GENERAL - PUBLISH_REGISTRATION_PROCESSOR_WORKFLOW_PAUSED_FOR_ADDITIONAL_INFO_EVENT_GENERAL + - SUBSCRIBE_CREDENTIAL_ISSUED_INDIVIDUAL - name: mosip-resident-client mappers: [] saroles: @@ -238,6 +239,8 @@ keycloak: - uma_authorization - name: mosip-prereg-client mappers: [] + del_saroles: + - INDIVIDUAL saroles: - PREREG - REGISTRATION_PROCESSOR diff --git a/deployment/v3/external/iam/upgrade-init.sh b/deployment/v3/external/iam/upgrade-init.sh index d2bbfc525..6965ae5e9 100755 --- a/deployment/v3/external/iam/upgrade-init.sh +++ b/deployment/v3/external/iam/upgrade-init.sh @@ -17,7 +17,9 @@ function upgrade_init() { IAM_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-iam-external-host}) echo Initializing keycloak - helm -n $NS install keycloak-upgrade mosip/keycloak-init --set frontend=https://$IAM_HOST/auth -f upgrade-init-values.yaml --wait --wait-for-jobs --version $CHART_VERSION + helm -n $NS install keycloak-init mosip/keycloak-init --set frontend=https://$IAM_HOST/auth -f upgrade-init-values.yaml --version $CHART_VERSION + echo Initializing keycloak + helm -n $NS install keycloak-init mosip/keycloak-init --set frontend=https://$IAM_HOST/auth -f import-init-values.yaml --version $CHART_VERSION return 0 } @@ -27,4 +29,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -upgrade_init # calling function +import_init # calling function diff --git a/deployment/v3/external/iam/values.yaml b/deployment/v3/external/iam/values.yaml index f12a9cfb1..4bf598394 100644 --- a/deployment/v3/external/iam/values.yaml +++ b/deployment/v3/external/iam/values.yaml @@ -9,7 +9,7 @@ auth: extraEnvVars: - name: KEYCLOAK_EXTRA_ARGS - value: "-Dkeycloak.profile.feature.upload_scripts=enabled" + value: "-Dkeycloak.profile.feature.upload_scripts=enabled -Dkeycloak.profile.feature.token_exchange=enabled -Dkeycloak.profile.feature.admin_fine_grained_authz=enabled" #value: "-Dkeycloak.profile.feature.upload_scripts=enabled -Dkeycloak.import=/config/realm-mosip.json" ## Disable ingress as we use Istio diff --git a/deployment/v3/external/kafka/backup.sh b/deployment/v3/external/kafka/backup.sh new file mode 100755 index 000000000..8328deabd --- /dev/null +++ b/deployment/v3/external/kafka/backup.sh @@ -0,0 +1,178 @@ +#!/bin/sh +# backup kafka via Velero +## Usage: ./backup.sh kubeconfig +chk_status(){ + RESOURCE=$1 + count=$( velero $1 get | grep -Ec 'New|InProgress' ) + if [ $count -gt 0 ]; then + echo "$(tput setaf 1) Previous velero $RESOURCE job is still in either 'New' state or 'InProgress' state; $(tput sgr0)"; + printf "%s Please wait till velero job is either completed or failed.\n You can troubleshoot the issue; EXITING %s" $(tput setaf 1) $(tput sgr0) + exit 1; + fi +} +read_user_input(){ + if [ $# -lt 2 ]; then + echo "$(tput setaf 1) Variable & Message arguments not passed to read_user_input function; EXITING $(tput sgr0)"; + exit 1; + fi + if [ $# -gt 2 ]; then + DEFAULT=$3; ## default values for $VAR variable + fi + VAR=$1; ## variable name + MSG=$2; ## message to be printed for the given variable + read -p " Provide $MSG : " $VAR; + TEMP=$( eval "echo \${$VAR}" ); ## save $VAR values to a temporary variable + eval ${VAR}=${TEMP:-$DEFAULT}; ## set $VAR value to $DEFAULT if $TEMP is empty, else set $VAR value to $TEMP + if [ -z $( eval "echo \${$VAR}" ) ]; then + echo "$(tput setaf 1) $MSG not provided; EXITING $(tput sgr0)"; + exit 1; + fi + DEFAULT=''; ## reset `DEFAULT` variable to empty string +} + +print_heading(){ + HEADING=$1 + tput setaf 3 + printf '\n_%*.0s' $(( $(tput cols)*10/100 -1 )) "" | tr " " "=" | tr "_" " " + echo -n " $HEADING " + printf '%*.0s \n\n' $(( $(tput cols) - ${#HEADING} - $(tput cols)*10/100 -5 )) "" | tr " " "=" + tput sgr0 +} + +chk_exit_status(){ + /bin/sh -c "$1" + EXIT_STATUS=$? + if [ $EXIT_STATUS -gt 0 ]; then + END_MSG='EXITING'; + if [ "$2" = "skip" ]; then + shift + END_MSG='SKIPPING'; + RETURN='TRUE'; + fi + shift + for msg in "${@}";do + echo "$(tput setaf 1) $msg; $END_MSG $(tput sgr0)"; + done + if [ "$RETURN" = "TRUE" ]; then + return "1"; + fi + exit 1; + fi + return "0"; +} + +## The script starts from here +### Cluster +HEADING="Check Cluster Config File" +print_heading "$HEADING"; ## calling print_heading function +chk_exit_status "[ $# -eq 1 ]" "Kubernetes Cluster config file not provided" +chk_exit_status "[ -f $1 ]" "Kubernetes Cluster config file not found" +echo "$(tput setaf 2) Kubernetes Cluster file found $(tput sgr0)" + +### check whether MINIO client ( mc ), kubectl, & velero is installed +HEADING="Check packages installed" +print_heading "$HEADING"; ## calling print_heading function +chk_exit_status "which mc > /dev/null" "MINIO Client ( mc ) not installed"; +chk_exit_status "which velero > /dev/null" "Velero is not installed"; +chk_exit_status "which kubectl > /dev/null" "kubectl is not installed"; +echo "$(tput setaf 2) kubectl, minio client (mc), & velero packages are already installed !!! $(tput sgr0)" + +K8S_CONFIG_FILE=$1 +export KUBECONFIG=$K8S_CONFIG_FILE + +### S3 / MINIO +HEADING="S3 Setup" +print_heading "$HEADING"; ## calling print_heading function +read_user_input s3_server "S3 server"; ## calling read_user_input function +read_user_input s3_access_key "S3 access key"; +read_user_input s3_secret_key "S3 secret key"; +read_user_input s3_region "S3 region ( Default region = minio )" "minio"; + +# set S3 alias +s3_alias=s3_server +echo -n " " +CMD="mc alias set $s3_alias $s3_server $s3_access_key $s3_secret_key --api S3v2" +chk_exit_status "$CMD" "Not able to reach S3 SERVER" + +# create velero bucket, Ignore if already exist +bucket=$s3_alias/velero +#chk_exit_status "mc ls $bucket" "skip" "Not able to access bucket $bucket"; +echo -n " " +CMD="mc mb --ignore-existing $bucket" +chk_exit_status "$CMD" "Not able to create bucket on S3 SERVER" + + +# check velero is already deployed on the cluster +HEADING="Velero install" +print_heading "$HEADING"; ## calling print_heading function + +printf "[default]\naws_access_key_id = %s\naws_secret_access_key = %s\n" "$s3_access_key" "$s3_secret_key" > credentials-velero + +CMD="kubectl --kubeconfig=$K8S_CONFIG_FILE get deployment/velero -n velero" +chk_exit_status "$CMD" "skip" "Velero is not deployed on this cluster" +STATUS=$? +if ! [ "$STATUS" = "0" ];then + velero install \ + --provider aws \ + --plugins velero/velero-plugin-for-aws:v1.2.1 \ + --bucket velero \ + --secret-file ./credentials-velero \ + --backup-location-config region=$s3_region,s3ForcePathStyle="true",s3Url=$s3_server \ + --use-volume-snapshots=false \ + --default-volumes-to-restic \ + --kubeconfig $K8S_CONFIG_FILE \ + --use-restic | sed 's/^/ /g' +fi +echo "$(tput setaf 2) Velero deployed $(tput sgr0)" + +NO_OF_RETRIES=5 +HEADING="Check BackupStorageLocations validity" +print_heading "$HEADING"; ## calling print_heading function +for i in $(seq 1 $NO_OF_RETRIES); do + echo "$(tput setaf 6) [Trying : $i ] $(tput sgr 0)"; + printf "\tPlease wait for 5 seconds;\n"; + sleep 5; + BackupStorageLocationValid=$( kubectl --kubeconfig=$K8S_CONFIG_FILE logs deployment/velero -n velero |grep -c "BackupStorageLocations is valid" 2>&1 & ); + printf "\tBackupStorageLocationValid = %s" $BackupStorageLocationValid; + if [ "$BackupStorageLocationValid" -eq 0 ]; then + printf "%s\n\tBackupStorageLocation is invalid; Trying to connect S3 again %s\n" $(tput setaf 1) $(tput sgr0); + if [ $i -eq $NO_OF_RETRIES ]; then + printf "%s\n\tUnable to connect to S3 bucket; EXITING %s" $(tput setaf 1) $(tput sgr0); + printf "%s\n\tPlease check whether S3 bucket is accessible from kubernetes cluster \n\tAnd also check S3 login credentials %s\n" $(tput setaf 4) $(tput sgr0); + exit 1; + fi + continue; + fi + printf "%s\n\tBackupStorageLocation is valid !!!%s\n" $(tput setaf 2) $(tput sgr0); + break; +done + +## create backup operation +HEADING="Create Backup" +print_heading "$HEADING"; ## calling print_heading function +chk_status backup ## calling chk_status function to check any backup or restore jon is in New/InProgress state + +read_user_input SERVICE "k8s service to be taken for backup "; +read_user_input NAMESPACE "k8s service Namespace"; +BACKUP_NAME="$SERVICE-$( date +'%d-%m-%Y-%H-%M' )" + +#### Check whether all $SERVICE.$NAMESPACE pods are up +echo -e "$(tput setaf 2) [ Check whether all $SERVICE.$NAMESPACE pods are up ] $(tput sgr0)\n" +for name in zookeeper kafka; do + CMD="kubectl --kubeconfig=$K8S_CONFIG_FILE -n $NAMESPACE wait --for=condition=ready pod --timeout=30s -l app.kubernetes.io/name=$name"; + chk_exit_status "$CMD" "The $name pods failed to be ready by 5 minutes."; +done + +echo "$(tput setaf 2) [ Creating Backup ] $(tput sgr0)" +printf "\t" +velero backup create "$BACKUP_NAME" \ + --default-volumes-to-restic \ + --selector app.kubernetes.io/instance="$SERVICE" \ + --include-namespaces "$NAMESPACE" \ + --kubeconfig "$K8S_CONFIG_FILE" \ + --wait + +HEADING="List Backup" +print_heading "$HEADING"; ## calling print_heading function +printf "%s\n [ List Backups ] %s" $(tput setaf 2) $(tput sgr0) +velero --kubeconfig $K8S_CONFIG_FILE backup get | sed 's/^/\t/g' diff --git a/deployment/v3/external/kafka/restore.sh b/deployment/v3/external/kafka/restore.sh new file mode 100755 index 000000000..1bb20daf0 --- /dev/null +++ b/deployment/v3/external/kafka/restore.sh @@ -0,0 +1,223 @@ +#!/bin/bash +# restore kafka via Velero +## Usage: ./restore.sh kubeconfig +chk_status(){ + RESOURCE=$1 + count=$( velero --kubeconfig=$K8S_CONFIG_FILE $1 get | grep -Ec 'New|InProgress' ) + if [ $count -gt 0 ]; then + echo "$(tput setaf 1) Previous velero $RESOURCE job is still in either 'New' state or 'InProgress' state; $(tput sgr0)"; + printf "%s Please wait till velero job is either completed or failed.\n You can troubleshoot the issue; EXITING %s" $(tput setaf 1) $(tput sgr0) + exit 1; + fi +} +read_user_input(){ + if [ $# -lt 2 ]; then + echo "$(tput setaf 1) Variable & Message arguments not passed to read_user_input function; EXITING $(tput sgr0)"; + exit 1; + fi + if [ $# -gt 2 ]; then + DEFAULT=$3; ## default values for $VAR variable + fi + VAR=$1; ## variable name + MSG=$2; ## message to be printed for the given variable + read -p " Provide $MSG : " $VAR; + TEMP=$( eval "echo \${$VAR}" ); ## save $VAR values to a temporary variable + eval ${VAR}=${TEMP:-$DEFAULT}; ## set $VAR value to $DEFAULT if $TEMP is empty, else set $VAR value to $TEMP + if [ -z $( eval "echo \${$VAR}" ) ]; then + echo "$(tput setaf 1) $MSG not provided; EXITING $(tput sgr0)"; + exit 1; + fi + DEFAULT=''; ## reset `DEFAULT` variable to empty string +} + +print_heading(){ + HEADING=$1 + tput setaf 3 + printf '\n_%*.0s' $(( $(tput cols)*10/100 -1 )) "" | tr " " "=" | tr "_" " " + echo -n " $HEADING " + printf '%*.0s \n\n' $(( $(tput cols) - ${#HEADING} - $(tput cols)*10/100 -5 )) "" | tr " " "=" + tput sgr0 +} + +chk_exit_status(){ + /bin/sh -c "$1" + EXIT_STATUS=$? + if [ $EXIT_STATUS -gt 0 ]; then + END_MSG='EXITING'; + if [ "$2" = "skip" ]; then + shift + END_MSG='SKIPPING'; + RETURN='TRUE'; + fi + shift + for msg in "${@}";do + echo "$(tput setaf 1) $msg; $END_MSG $(tput sgr0)"; + done + if [ "$RETURN" = "TRUE" ]; then + return "1"; + fi + exit 1; + fi + return "0"; +} + +## The script starts from here +### Cluster +HEADING="Check Cluster Config File" +print_heading "$HEADING"; ## calling print_heading function +chk_exit_status "[ $# -eq 1 ]" "Kubernetes Cluster config file not provided" +chk_exit_status "[ -f $1 ]" "Kubernetes Cluster config file not found" +echo "$(tput setaf 2) Kubernetes Cluster file found $(tput sgr0)" + +### check whether MINIO client ( mc ), kubectl, & velero is installed +HEADING="Check packages installed" +print_heading "$HEADING"; ## calling print_heading function +chk_exit_status "which mc > /dev/null" "MINIO Client ( mc ) not installed"; +chk_exit_status "which velero > /dev/null" "Velero is not installed"; +chk_exit_status "which kubectl > /dev/null" "kubectl is not installed"; +echo "$(tput setaf 2) kubectl, minio client (mc), & velero packages are already installed !!! $(tput sgr0)" + +K8S_CONFIG_FILE=$1 +export KUBECONFIG=$K8S_CONFIG_FILE + +### S3 / MINIO +HEADING="S3 Setup" +print_heading "$HEADING"; ## calling print_heading function +read_user_input s3_server "S3 server"; ## calling read_user_input function +read_user_input s3_access_key "S3 access key"; +read_user_input s3_secret_key "S3 secret key"; +read_user_input s3_region "S3 region ( Default region = minio )" "minio"; + +# set S3 alias +s3_alias=s3_server +echo -n " " +CMD="mc alias set $s3_alias $s3_server $s3_access_key $s3_secret_key --api S3v2" +chk_exit_status "$CMD" "Not able to reach S3 SERVER" + +# create velero bucket, Ignore if already exist +bucket=$s3_alias/velero +echo -n " " +chk_exit_status "mc ls $bucket" "Not able to access bucket $bucket"; +#CMD="mc mb --ignore-existing $bucket" +#chk_exit_status "$CMD" "Not able to create bucket on S3 SERVER" + + +# check velero is already deployed on the cluster +HEADING="Velero install" +print_heading "$HEADING"; ## calling print_heading function + +printf "[default]\naws_access_key_id = %s\naws_secret_access_key = %s\n" "$s3_access_key" "$s3_secret_key" > credentials-velero + +CMD="kubectl --kubeconfig=$K8S_CONFIG_FILE get deployment/velero -n velero" +chk_exit_status "$CMD" "skip" "Velero is not deployed on this cluster" +STATUS=$? +if ! [ "$STATUS" = "0" ];then + velero install \ + --provider aws \ + --plugins velero/velero-plugin-for-aws:v1.2.1 \ + --bucket velero \ + --secret-file ./credentials-velero \ + --backup-location-config region=$s3_region,s3ForcePathStyle="true",s3Url=$s3_server \ + --use-volume-snapshots=false \ + --default-volumes-to-restic \ + --kubeconfig $K8S_CONFIG_FILE \ + --use-restic | sed 's/^/ /g' +fi +echo "$(tput setaf 2) Velero deployed $(tput sgr0)" + +NO_OF_RETRIES=5 +HEADING="Check BackupStorageLocations validity" +print_heading "$HEADING"; ## calling print_heading function +for i in $(seq 1 $NO_OF_RETRIES); do + echo "$(tput setaf 6) [Trying : $i ] $(tput sgr 0)"; + printf "\tPlease wait for 5 seconds;\n"; + sleep 5; + BackupStorageLocationValid=$( kubectl --kubeconfig=$K8S_CONFIG_FILE logs deployment/velero -n velero |grep -c "BackupStorageLocations is valid" 2>&1 & ); + printf "\tBackupStorageLocationValid = %s" $BackupStorageLocationValid; + if [ "$BackupStorageLocationValid" -eq 0 ]; then + printf "%s\n\tBackupStorageLocation is invalid; Trying to connect S3 again %s\n" $(tput setaf 1) $(tput sgr0); + if [ $i -eq $NO_OF_RETRIES ]; then + printf "%s\n\tUnable to connect to S3 bucket; EXITING %s" $(tput setaf 1) $(tput sgr0); + printf "%s\n\tPlease check whether S3 bucket is accessible from kubernetes cluster \n\tAnd also check S3 login credentials %s\n" $(tput setaf 4) $(tput sgr0); + exit 1; + fi + continue; + fi + printf "%s\n\tBackupStorageLocation is valid !!!%s\n" $(tput setaf 2) $(tput sgr0); + break; +done + +## create restore operation +HEADING="Create Restore" +print_heading "$HEADING"; ## calling print_heading function +chk_status restore ## calling chk_status function to check any backup or restore jon is in New/InProgress state + +read_user_input NS "Namespace to restore ( Default Namespace = kafka )" "kafka"; + +#### Update helm repos +HEADING="Update HELM repos"; +print_heading "$HEADING"; ## calling print_heading function +helm repo add kafka-ui https://provectus.github.io/kafka-ui | sed 's/^/ /g'; +helm repo add bitnami https://charts.bitnami.com/bitnami | sed 's/^/ /g'; +helm repo update | sed 's/^/ /g'; + +#### create namespace +HEADING="Create Namespace"; +print_heading "$HEADING"; ## calling print_heading function +kubectl --kubeconfig=$K8S_CONFIG_FILE create ns $NS | sed 's/^/ /g'; + +#### Install Kafka +HEADING="Install kafka"; +print_heading "$HEADING"; ## calling print_heading function +helm -n $NS install kafka bitnami/kafka -f values.yaml --wait | sed 's/^/ /g'; + +#### Install kafka-ui +HEADING='Install kafka-ui'; +print_heading "$HEADING"; ## calling print_heading function +helm -n $NS install kafka-ui kafka-ui/kafka-ui --set envs.config.KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka.$NS:9092 --set envs.config.KAFKA_CLUSTERS_0_ZOOKEEPER=kafka-zookeeper.$NS:2181 -f ui-values.yaml --wait | sed 's/^/ /g'; + +#### Install Istio addons +HEADING='Install Istio addons'; +print_heading "$HEADING"; ## calling print_heading function +KAFKA_UI_HOST=$(kubectl --kubeconfig=$K8S_CONFIG_FILE get cm global -o jsonpath={.data.mosip-api-internal-host}) +KAFKA_UI_NAME=kafka-ui +helm -n $NS install istio-addons chart/istio-addons --set kafkaUiHost=$KAFKA_UI_HOST --set installName=$KAFKA_UI_NAME | sed 's/^/ /g'; + +HEADING="List Backup"; +print_heading "$HEADING"; ## calling print_heading function +printf "%s\n [ List Backups ] %s" $(tput setaf 2) $(tput sgr0); +velero --kubeconfig $K8S_CONFIG_FILE backup get | sed 's/^/\t/g'; + +HEADING="Restore Kafka from Backup"; +print_heading "$HEADING"; ## calling print_heading function +printf "%s\n [ List Backups ] %s" $(tput setaf 2) $(tput sgr0); +velero --kubeconfig $K8S_CONFIG_FILE backup get | sed 's/^/\t/g'; + +printf "%s\n [ Check backup existence ] %s\n\t" $(tput setaf 2) $(tput sgr0); +read_user_input BACKUP_NAME "Backup Name"; +CMD="velero --kubeconfig $K8S_CONFIG_FILE backup get | grep -E '(^|\s)$BACKUP_NAME($|\s)'"; +chk_exit_status "$CMD" "\t Backup Name not found"; + +printf "%s\n [ Remove Kafka & Zookeeper statefulset ] %s\n\t" $(tput setaf 2) $(tput sgr0); +CMD="kubectl --kubeconfig $K8S_CONFIG_FILE -n $NS --ignore-not-found=true delete statefulset kafka kafka-zookeeper"; +chk_exit_status "$CMD" "\t Backup Name not found"; +#### Check whether all kafka.$NAMESPACE pods are terminated +printf "\n%s [ Check whether all kafka.$NS pods are terminated ] %s\n" $(tput setaf 2) $(tput sgr0) +for name in kafka zookeeper; do + CMD="kubectl --kubeconfig=$K8S_CONFIG_FILE -n $NS wait --for=delete pod --timeout=300s -l app.kubernetes.io/name=$name"; + chk_exit_status "$CMD" "The $name pods failed to be ready by 5 minutes."; +done + +printf "%s\n [ Restore Kafka ] %s\n\t" $(tput setaf 2) $(tput sgr0); +RESTORE_NAME="restore-$BACKUP_NAME-$( date +'%d-%m-%Y-%H-%M' )"; +velero restore --kubeconfig $K8S_CONFIG_FILE create $RESTORE_NAME --from-backup $BACKUP_NAME --namespace-mappings default:$NS --wait; + +printf "%s\n [ Update Namespace in statefulset environmental variables ] %s\n\t" $(tput setaf 2) $(tput sgr0); +kubectl -n $NS get statefulset kafka-zookeeper -o yaml | sed "s/default.svc.cluster.local/$NS.svc.cluster.local/g" | kubectl -n $NS apply -f -; +kubectl -n $NS get statefulset kafka -o yaml | sed "s/default.svc.cluster.local/$NS.svc.cluster.local/g" | kubectl -n $NS apply -f - ; + +HEADING="List Restores"; +print_heading "$HEADING"; ## calling print_heading function +printf "%s\n [ List Restores ] %s\n" $(tput setaf 2) $(tput sgr0); +velero --kubeconfig $K8S_CONFIG_FILE restore get | sed 's/^/\t/g'; + diff --git a/deployment/v3/external/landing-page/copy_cm.sh b/deployment/v3/external/landing-page/copy_cm.sh index 2b3664898..b168e7026 100755 --- a/deployment/v3/external/landing-page/copy_cm.sh +++ b/deployment/v3/external/landing-page/copy_cm.sh @@ -17,4 +17,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_cm # calling function \ No newline at end of file +copying_cm # calling function diff --git a/deployment/v3/external/landing-page/delete.sh b/deployment/v3/external/landing-page/delete.sh index aa1bc0e8c..a86f11fda 100755 --- a/deployment/v3/external/landing-page/delete.sh +++ b/deployment/v3/external/landing-page/delete.sh @@ -21,4 +21,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -landing_page # calling function \ No newline at end of file +landing_page # calling function diff --git a/deployment/v3/external/landing-page/install.sh b/deployment/v3/external/landing-page/install.sh index b55c64814..17fe37a95 100755 --- a/deployment/v3/external/landing-page/install.sh +++ b/deployment/v3/external/landing-page/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=landing-page -CHART_VERSION=12.0.1 +CHART_VERSION=12.0.2 echo Create $NS namespace kubectl create ns $NS @@ -42,6 +42,8 @@ function landing_page() { ESIGNET=$(kubectl get cm global -o jsonpath={.data.mosip-esignet-host}) SMTP=$(kubectl get cm global -o jsonpath={.data.mosip-smtp-host}) HEALTHSERVICES=$(kubectl get cm global -o jsonpath={.data.mosip-healthservices-host}) + INJIWEB=$(kubectl get cm global -o jsonpath={.data.mosip-injiweb-host}) + INJIVERIFY=$(kubectl get cm global -o jsonpath={.data.mosip-injiverify-host}) echo Installing landing page helm -n $NS install landing-page mosip/landing-page --version $CHART_VERSION \ @@ -59,15 +61,18 @@ function landing_page() { --set landing.regclient=$REGCLIENT \ --set landing.postgres.host=$POSTGRES \ --set landing.postgres.port=$POSTGRES_PORT \ - --set landing.pmp=$PMP \ --set landing.compliance=$COMPLIANCE \ + --set landing.pmp=$PMP \ --set landing.resident=$RESIDENT \ --set landing.esignet=$ESIGNET \ --set landing.smtp=$SMTP \ --set landing.healthservices=$HEALTHSERVICES \ + --set landing.injiweb=$INJIWEB \ + --set landing.injiverify=$INJIVERIFY \ --set istio.host=$DOMAIN kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status + echo Installed landing page return 0 } diff --git a/deployment/v3/external/landing-page/restart.sh b/deployment/v3/external/landing-page/restart.sh index 5a3942e19..4b9ab4bef 100755 --- a/deployment/v3/external/landing-page/restart.sh +++ b/deployment/v3/external/landing-page/restart.sh @@ -10,6 +10,7 @@ function landing_page() { NS=landing-page kubectl -n $NS rollout restart deploy + kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status echo Restarted landing page pod @@ -22,4 +23,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -landing_page # calling function \ No newline at end of file +landing_page # calling function diff --git a/deployment/v3/external/landing-page/template/landing.html b/deployment/v3/external/landing-page/template/landing.html index 1817881e8..c406b1e2b 100644 --- a/deployment/v3/external/landing-page/template/landing.html +++ b/deployment/v3/external/landing-page/template/landing.html @@ -1,115 +1,115 @@ - - -MOSIP - - - - -
-
- -

MOSIP Deployment

-

dev3

-

Version: develop

-
-
-
- - - - - - - -
-

-
-
- - - - - - - - - - - - - -
External APIhttps://api.sandbox.mosip.net
Internal APIhttps://api-internal.sandbox.mosip.net
- - postgres.sandbox.mosip.net:5432
-
-

-
- -
- - - - + + +MOSIP + + + + +
+
+ +

MOSIP Deployment

+

dev3

+

Version: develop

+
+
+
+ + + + + + + +
+

+
+
+ + + + + + + + + + + + + +
External APIhttps://api.sandbox.mosip.net
Internal APIhttps://api-internal.sandbox.mosip.net
+ + postgres.sandbox.mosip.net:5432
+
+

+
+ +
+ + + + diff --git a/deployment/v3/external/msg-gateway/README.md b/deployment/v3/external/msg-gateway/README.md index c13dc6d43..8429ec6c4 100644 --- a/deployment/v3/external/msg-gateway/README.md +++ b/deployment/v3/external/msg-gateway/README.md @@ -4,6 +4,10 @@ The information of your SMTP and SMS gateways is created here. Create these con If you would like to use Gmail SMTP. You can follow the procedure from [here](../../docs/create-gmail-app-password.md) +If you would like to use mock-smtp. You can follow the procedure form [here](../../mosip/mock-smtp/README.md). + +If you would like to use Gmail SMTP. You can follow the procedure from [here](../../docs/create-gmail-app-password.md) + Run ```sh ./install.sh diff --git a/deployment/v3/external/msg-gateway/install.sh b/deployment/v3/external/msg-gateway/install.sh index 31398ac68..2f1241d70 100755 --- a/deployment/v3/external/msg-gateway/install.sh +++ b/deployment/v3/external/msg-gateway/install.sh @@ -56,7 +56,6 @@ function msg_gateway() { echo smtp and sms related configurations set. return 0 } - # set commands for error handling. set -e set -o errexit ## set -e : exit the script if any statement returns a non-true return value diff --git a/deployment/v3/external/oauth2-proxy/README.md b/deployment/v3/external/oauth2-proxy/README.md index 8242523ff..c6c0da688 100644 --- a/deployment/v3/external/oauth2-proxy/README.md +++ b/deployment/v3/external/oauth2-proxy/README.md @@ -1,31 +1,41 @@ ## Install Oauth2-Proxy in cluster. -This directory contains files that can be used to install oauth2-proxy in a given cluster. This will also install and setup a new realm in keycloak, `istio`. (If such a realm is already present it needs to be deleted manually. (TODO: develop this to use pre-existing istio realm)) +This directory contains files that can be used to install oauth2-proxy in a given cluster. ### Installation: -```sh -./install.sh -``` -After installation is done, go to `istio` realm in keycloak and create required users and assign appropriate roles - -### Applying Policies +- Login to MOSIP cluster keycloak (ex: `iam.sandbox.mosip.net`) as `admin`. +- Click on *Add Realm*, on top left of admin console. +- Click on *Select File* on Import. And select the `istio-realm.json` file in this directory. +- Give *name* as `istio`. Switch on *Enabled*. +- Click *Create*. +- After the realm is created, configure the *frontendUrl* on *Realm Settings* page of the `istio` realm. (Example: `frontendUrl: https://iam.sandbox.mosip.net/auth`) +- Then navigate to *Clients* page, and to the `istio-auth-client`. +- Go to *Credentials* section, and *Regenerate Secret*. +- Copy the new secret for further use. +- Then run the install script: + ```sh + ./install.sh + ``` +- After installation is done, go to `istio` realm in keycloak and create required users and assign appropriate roles. + +### Applying and removing policies -- There are two policies provided in the `sample-auth-policy.yaml`, they work like this. First the CUSTOM auth policy will authenticate the incoming request (this custom auth provider is oauth2-proxy), then the second DENY filter will deny the request if it didnt receive the appropriate role binding. - After installation, use the following script to apply policies on cluster. -```sh -./apply_policy.sh sample-auth-policy.yaml -``` -- These policy are just regular istio resources and can be applied manually without the script also (like `kubectl apply -f`). -- But the script replaces varibles like `h__mosip-api-internal-host__h`, `h__mosip-kibana-host__h`, etc from global configmap in cluster, before applying. So that one doesnt have to care about the hostname while applying. -- To remove the policies; `kubectl delete -f sample-auth-policy.yaml` + ```sh + ./apply_policy.sh sample-auth-policy.yaml + ``` +- To remove the policies; + ```sh + kubectl delete -f sample-auth-policy.yaml + ``` ### Setting appropriate roles for urls Edit the sample-auth-policy.yaml for setting roles for each set of hosts and uris, and apply the policy -### To Uninstall -``` -kubectl delete ns oauth2-proxy +### Uninstall +```sh +./delete.sh ``` Also dont forget to remove the policies seperately. `kubectl delete -f sample-auth-policy.yaml` @@ -34,3 +44,5 @@ Also dont forget to remove the policies seperately. `kubectl delete -f sample-au - Find the conf of `istio` realm in `istio-realm.json` file. - Find the conf of oauth2-proxy installation in the `oauth2-proxy.yaml` file - Find the istiod configuration of external authorization in the istio-operator file. +- There are two policies provided in the `sample-auth-policy.yaml`, they work like this. First the CUSTOM auth policy will authenticate the incoming request (this custom auth provider is oauth2-proxy), then the second DENY filter will deny the request if it didnt receive the appropriate role binding. +- Using the `apply_policy.sh` script, replaces variables like `h__mosip-api-internal-host__h`, `h__mosip-kibana-host__h`, etc in the policy, before applying (values taken from global configmap of cluster). diff --git a/deployment/v3/external/oauth2-proxy/apply_policy.sh b/deployment/v3/external/oauth2-proxy/apply_policy.sh index e95ca03ce..4ab096332 100755 --- a/deployment/v3/external/oauth2-proxy/apply_policy.sh +++ b/deployment/v3/external/oauth2-proxy/apply_policy.sh @@ -11,8 +11,12 @@ cp $1 $temp_file for host_var in $(cat $temp_file | grep -oP '(?<=h__).*(?=__h)'); do host_name="$(kubectl get cm global -o jsonpath={.data.$host_var})" - sed -i "s/h__${host_var}__h/$host_name/g" $temp_file + sed -i "s;h__${host_var}__h;${host_name};g" $temp_file done +jwks_uri=$(cat $temp_file | grep -oP '(?<=rjwks__).*(?=__rjwks)') +remote_local_jwks=$(curl -s "$jwks_uri") +sed -i "s;rjwks__${jwks_uri}__rjwks;${remote_local_jwks};g" $temp_file + kubectl apply -f $temp_file rm $temp_file diff --git a/deployment/v3/external/oauth2-proxy/delete.sh b/deployment/v3/external/oauth2-proxy/delete.sh new file mode 100755 index 000000000..91fb765a5 --- /dev/null +++ b/deployment/v3/external/oauth2-proxy/delete.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +NS=oauth2-proxy + +if [ $# -ge 1 ]; then + export KUBECONFIG=$1 +fi + +function Deleting_oauth2_proxy() { + kubectl delete -n $NS -f oauth2-proxy.yaml + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +Deleting_oauth2_proxy_oauth2_proxy # calling function +} \ No newline at end of file diff --git a/deployment/v3/external/oauth2-proxy/install.sh b/deployment/v3/external/oauth2-proxy/install.sh index 7660e1823..1a92ab239 100755 --- a/deployment/v3/external/oauth2-proxy/install.sh +++ b/deployment/v3/external/oauth2-proxy/install.sh @@ -1,63 +1,57 @@ #!/bin/bash -ROOT_DOMAIN=".mosip.net" +NS=oauth2-proxy if [ $# -ge 1 ]; then export KUBECONFIG=$1 fi -if ! java -version > /dev/null 2> /dev/null; then - echo "java is missing. Please install java"; exit 1; +if [ -z "$INSTALLATION_NAME" ]; then + INSTALLATION_NAME=$(kubectl get cm global -ojsonpath={.data.installation-name}) + read -p "Current installation name (default: $INSTALLATION_NAME) : " TO_REPLACE + INSTALLATION_NAME=${TO_REPLACE:-$INSTALLATION_NAME} + unset TO_REPLACE fi -if ! jq --version > /dev/null 2> /dev/null; then - echo "jq is missing. Please install jq"; exit 2; +if [ -z "$IAM_HOST" ]; then + IAM_HOST=$(kubectl get cm keycloak-host -n keycloak -ojsonpath={.data.keycloak-external-host}) + read -p "Keycloak host name (default: $IAM_HOST) : " TO_REPLACE + IAM_HOST=${TO_REPLACE:-$IAM_HOST} + unset TO_REPLACE fi -if ! [ -d "keycloak-15.0.2" ]; then - echo "Downloading keycloak admin cli" && wget -q --show-progress "https://github.com/keycloak/keycloak/releases/download/15.0.2/keycloak-15.0.2.zip" && - echo "Download Success. Unzipping.." && unzip -q "keycloak-15.0.2.zip" && - rm "keycloak-15.0.2.zip" > /dev/null +if [ -z "$istio_client_id" ]; then + istio_client_id="istio-auth-client" + read -p "Keycloak istio auth client name (default: $istio_client_id) : " TO_REPLACE + istio_client_id=${TO_REPLACE:-$istio_client_id} + unset TO_REPLACE fi -KCADM_SH="./keycloak-15.0.2/bin/kcadm.sh" -KCADM_CFG="$KCADM_SH.config" - -INSTALLATION_NAME=$(kubectl get cm global -ojsonpath={.data.installation-name}) -read -p "Current installation name (default: $INSTALLATION_NAME) : " TO_REPLACE -INSTALLATION_NAME=${TO_REPLACE:-$INSTALLATION_NAME} -unset TO_REPLACE - -IAM_HOST=$(kubectl get cm keycloak-host -n keycloak -ojsonpath={.data.keycloak-host}) -read -p "Keycloak host name (default: $IAM_HOST) : " TO_REPLACE -IAM_HOST=${TO_REPLACE:-$IAM_HOST} -unset TO_REPLACE - -IAM_ADMIN_PASS=$(kubectl get secret keycloak -n keycloak -ojsonpath={.data.admin-password} | base64 --decode) -read -p "Keycloak admin password (leave empty to take from secret) : " TO_REPLACE -IAM_HOST=${TO_REPLACE:-$IAM_HOST} -unset TO_REPLACE - -echo "Creating Keycloak Realm, Istio. And the required client and roles" -$KCADM_SH config credentials --server https://$IAM_HOST/auth --realm master --user admin --password $IAM_ADMIN_PASS --config $KCADM_CFG -$KCADM_SH create realms -s realm=istio -s enabled=true --config $KCADM_CFG; if [ $? -ne 0 ]; then echo "Realm Already Exists. Please delete it."; exit 3; fi -realm_create_output=$($KCADM_SH create partialImport -r istio -s ifResourceExists=SKIP -o -f istio-realm.json --config $KCADM_CFG) +if [ -z "$istio_client_secret" ]; then + read -p "Keycloak istio auth client secret : " istio_client_secret + if [ -z "$istio_client_secret" ]; then + exit "Give valid client secret." + fi +fi -istio_client_id="istio-auth-client" -istio_client_id_ID=$(echo $realm_create_output | jq '.results' | jq ".[] | select(.resourceName==\"$istio_client_id\")" | jq -r '.id') -$KCADM_SH create clients/$istio_client_id_ID/client-secret -r istio --config $KCADM_CFG -istio_client_secret=$($KCADM_SH get clients/$istio_client_id_ID/client-secret -r istio --config $KCADM_CFG | jq -r '.value') +if [ -z "$ROOT_DOMAIN" ]; then + ROOT_DOMAIN=".mosip.net" + read -p "Root Domain for oauth2-proxy cookie (default: $ROOT_DOMAIN) : " TO_REPLACE + ROOT_DOMAIN=${TO_REPLACE:-$ROOT_DOMAIN} + unset TO_REPLACE +fi TEMP_MANIFEST=./.manifest.yaml.tmp cp oauth2-proxy.yaml $TEMP_MANIFEST +cookie_secret=$(dd if=/dev/urandom bs=32 count=1 2>/dev/null | base64 | tr -d -- '\n' | tr -- '+/' '-_' | base64) + sed -i "s/___ISTIO_CLIENT_ID___/$istio_client_id/g" $TEMP_MANIFEST sed -i "s/___ISTIO_CLIENT_SECRET___/$istio_client_secret/g" $TEMP_MANIFEST sed -i "s/___IAM_HOST___/$IAM_HOST/g" $TEMP_MANIFEST sed -i "s/___ROOT_DOMAIN___/$ROOT_DOMAIN/g" $TEMP_MANIFEST sed -i "s/___INSTALLATION_NAME___/$INSTALLATION_NAME/g" $TEMP_MANIFEST - -NS=oauth2-proxy +sed -i "s/___COOKIE_SECRET___/$cookie_secret/g" $TEMP_MANIFEST echo Creating namespace kubectl create ns $NS @@ -71,7 +65,7 @@ function installing_oauth2_proxy() { #helm -n $NS install oauth2-proxy bitnami/oauth2-proxy -f values-oauth2-proxy.yaml --set configuration.clientID=$istio_client_id --set configuration.clientSecret=$istio_client_secret #helm -n $NS install oauth2-proxy bitnami/oauth2-proxy -f values-oauth2-proxy.yaml - rm $TEMP_MANIFEST $KCADM_CFG + rm $TEMP_MANIFEST return 0 } @@ -81,4 +75,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -installing_oauth2_proxy # calling function \ No newline at end of file +installing_oauth2_proxy # calling function diff --git a/deployment/v3/external/oauth2-proxy/istio-realm.json b/deployment/v3/external/oauth2-proxy/istio-realm.json index 5e6343b50..43463fc22 100644 --- a/deployment/v3/external/oauth2-proxy/istio-realm.json +++ b/deployment/v3/external/oauth2-proxy/istio-realm.json @@ -84,6 +84,30 @@ "containerId": "istio", "attributes": {} }, + { + "id": "d9d0681c-a8e4-4a8f-bbc8-2bd15b8b5e93", + "name": "swagger_access", + "composite": false, + "clientRole": false, + "containerId": "istio", + "attributes": {} + }, + { + "id": "d9d0681c-a8e4-4a8f-bbc8-2bd15b8b5e94", + "name": "kibana_access", + "composite": false, + "clientRole": false, + "containerId": "istio", + "attributes": {} + }, + { + "id": "d9d0681c-a8e4-4a8f-bbc8-2bd15b8b5e95", + "name": "kafka_ui_access", + "composite": false, + "clientRole": false, + "containerId": "istio", + "attributes": {} + }, { "id": "3ad79da3-1773-4f88-a1e2-965d7c3dbadf", "name": "uma_authorization", diff --git a/deployment/v3/external/oauth2-proxy/oauth2-proxy.yaml b/deployment/v3/external/oauth2-proxy/oauth2-proxy.yaml index 386fe15c2..f6632ef84 100644 --- a/deployment/v3/external/oauth2-proxy/oauth2-proxy.yaml +++ b/deployment/v3/external/oauth2-proxy/oauth2-proxy.yaml @@ -1,92 +1,105 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: oauth2-proxy-confmap -data: - oauth2_proxy.cfg: | - email_domains = ["*"] - redirect_url = "https://___IAM_HOST___/oauth2/callback" - silence_ping_logging = true - skip_provider_button = true - whitelist_domains = ["___ROOT_DOMAIN___"] - cookie_domains = ["___ROOT_DOMAIN___"] - cookie_name = "_oauth2_proxy____INSTALLATION_NAME___" - alpha.cfg: | - injectRequestHeaders: - - name: X-Forwarded-Groups - values: - - claim: groups - - name: X-Forwarded-User - values: - - claim: user - - name: X-Forwarded-Email - values: - - claim: email - - name: X-Forwarded-Preferred-Username - values: - - claim: preferred_username - - name: Authorization - values: - - claim: id_token - prefix: 'Bearer ' - injectResponseHeaders: - - name: X-Auth-Access-Token - values: - - claim: access_token - - name: X-Auth-Request-User - values: - - claim: user - - name: X-Auth-Request-Email - values: - - claim: email - - name: X-Auth-Request-Preferred-Username - values: - - claim: preferred_username - - name: X-Auth-Request-Groups - values: - - claim: groups - - name: Authorization - values: - - claim: id_token - prefix: 'Bearer ' - providers: - - clientID: "___ISTIO_CLIENT_ID___" - clientSecret: "___ISTIO_CLIENT_SECRET___" - id: keycloak-oidc-istio - provider: keycloak-oidc - oidcConfig: - emailClaim: email - groupsClaim: groups - userIDClaim: email - insecureAllowUnverifiedEmail: true - insecureSkipNonce: true - issuerURL: https://___IAM_HOST___/auth/realms/istio - server: - BindAddress: 0.0.0.0:4180 - upstreamConfig: - upstreams: - - id: static_200 - path: / - static: true - staticCode: 200 ---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: oauth2-proxy-confmap + labels: + app.kubernetes.io/name: oauth2-proxy +data: + oauth2_proxy.cfg: | + email_domains = ["*"] + redirect_url = "https://___IAM_HOST___/oauth2/callback" + silence_ping_logging = true + skip_provider_button = true + whitelist_domains = ["___ROOT_DOMAIN___"] + cookie_domains = ["___ROOT_DOMAIN___"] + cookie_name = "_oauth2_proxy____INSTALLATION_NAME___" + skip_jwt_bearer_tokens = true + alpha.cfg: | + injectRequestHeaders: + - name: X-Forwarded-Groups + values: + - claim: groups + - name: X-Forwarded-User + values: + - claim: user + - name: X-Forwarded-Email + values: + - claim: email + - name: X-Forwarded-Preferred-Username + values: + - claim: preferred_username + - name: Authorization + values: + - claim: id_token + prefix: 'Bearer ' + injectResponseHeaders: + - name: X-Auth-Access-Token + values: + - claim: access_token + - name: X-Auth-Request-User + values: + - claim: user + - name: X-Auth-Request-Email + values: + - claim: email + - name: X-Auth-Request-Preferred-Username + values: + - claim: preferred_username + - name: X-Auth-Request-Groups + values: + - claim: groups + - name: Authorization + values: + - claim: id_token + prefix: 'Bearer ' + providers: + - clientID: "___ISTIO_CLIENT_ID___" + clientSecret: "___ISTIO_CLIENT_SECRET___" + id: keycloak-oidc-istio + provider: keycloak-oidc + oidcConfig: + emailClaim: email + groupsClaim: groups + userIDClaim: email + insecureAllowUnverifiedEmail: true + insecureSkipNonce: true + issuerURL: https://___IAM_HOST___/auth/realms/istio + server: + BindAddress: 0.0.0.0:4180 + upstreamConfig: + upstreams: + - id: static_200 + path: / + static: true + staticCode: 200 +--- +apiVersion: v1 +kind: Secret +metadata: + name: oauth2-proxy-secret + labels: + app.kubernetes.io/name: oauth2-proxy +type: Opaque +data: + OAUTH2_PROXY_COOKIE_SECRET: ___COOKIE_SECRET___ +--- apiVersion: apps/v1 kind: Deployment metadata: name: oauth2-proxy labels: - app: oauth2-proxy + app.kubernetes.io/name: oauth2-proxy spec: replicas: 1 strategy: type: RollingUpdate selector: matchLabels: - app: oauth2-proxy + app.kubernetes.io/name: oauth2-proxy template: metadata: labels: - app: oauth2-proxy + app.kubernetes.io/name: oauth2-proxy spec: containers: - name: oauth2-proxy @@ -95,9 +108,9 @@ spec: args: - --config=/bitnami/oauth2-proxy/conf/oauth2_proxy.cfg - --alpha-config=/bitnami/oauth2-proxy/conf/alpha.cfg - env: - - name: OAUTH2_PROXY_COOKIE_SECRET - value: "WFhYWFhYWFhYWFhYWFhYWA==" + envFrom: + - secretRef: + name: oauth2-proxy-secret ports: - containerPort: 4180 name: http @@ -139,11 +152,11 @@ kind: Service metadata: name: oauth2-proxy labels: - app: oauth2-proxy + app.kubernetes.io/name: oauth2-proxy spec: type: ClusterIP selector: - app: oauth2-proxy + app.kubernetes.io/name: oauth2-proxy ports: - name: http port: 80 diff --git a/deployment/v3/external/oauth2-proxy/sample-auth-policy.yaml b/deployment/v3/external/oauth2-proxy/sample-auth-policy.yaml index 856e59ca8..f56f297f5 100644 --- a/deployment/v3/external/oauth2-proxy/sample-auth-policy.yaml +++ b/deployment/v3/external/oauth2-proxy/sample-auth-policy.yaml @@ -1,29 +1,32 @@ -apiVersion: security.istio.io/v1beta1 -kind: AuthorizationPolicy -metadata: - name: sample-httpbin-authn-policy - namespace: istio-system -spec: - selector: - matchLabels: - istio: ingressgateway-internal - action: CUSTOM - provider: - name: oauth2-proxy - rules: - - to: - - operation: - hosts: ["h__mosip-api-internal-host__h"] - paths: ["/httpbin*"] ---- -# # The following AuthorizationPolicy is not supported by istio yet. +# # The following AuthorizationPolicys are not supported by istio yet. # # Hence an envoyfilter is written manually -# # In future once support is added replace the subsequent envoyfilter with this authorizationPolicy (or similar one). +# # In future once support is added replace the subsequent envoyfilter with this authorizationPolicys (or similar ones). +# apiVersion: security.istio.io/v1beta1 +# kind: AuthorizationPolicy +# metadata: +# name: sample-ext-custom-authn-policy +# namespace: istio-system +# spec: +# selector: +# matchLabels: +# istio: ingressgateway-internal +# action: CUSTOM +# provider: +# name: oauth2-proxy +# rules: +# - to: +# - operation: +# hosts: ["h__mosip-api-internal-host__h"] +# paths: ["/httpbin*"] +# - operation: +# hosts: ["h__mosip-api-internal-host__h"] +# paths: [ "*swagger*" ] +# --- # # apiVersion: security.istio.io/v1beta1 # kind: AuthorizationPolicy # metadata: -# name: sample-httpbin-authz-policy +# name: sample-ext-custom-authz-policy # namespace: istio-system # spec: # selector: @@ -38,11 +41,11 @@ spec: # when: # - key: request.headers[x-auth-request-groups] # notValues: ["*role:httpbin_access*"] ---- +# --- apiVersion: networking.istio.io/v1alpha3 kind: EnvoyFilter metadata: - name: sample-httpbin-authz-filter + name: sample-ext-custom-authz-filter namespace: istio-system spec: workloadSelector: @@ -57,9 +60,182 @@ spec: filterChain: filter: name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" patch: - operation: ADD - filterClass: AUTHZ + operation: INSERT_BEFORE + value: + name: envoy.filters.http.rbac + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + shadow_rules: + action: DENY + policies: + "istio-ext-authz-ns[istio-system]-policy[sample-ext-custom-authn-policy]-rule[0]": + permissions: + - and_rules: + rules: + - header: + name: ":authority" + string_match: + exact: h__mosip-api-internal-host__h + ignore_case: true + - url_path: + path: + prefix: "/httpbin" + - and_rules: + rules: + - header: + name: ":authority" + string_match: + exact: h__mosip-api-internal-host__h + ignore_case: true + - url_path: + path: + contains: "swagger" + - header: + name: ":authority" + string_match: + exact: h__mosip-kibana-host__h + ignore_case: true + - header: + name: ":authority" + string_match: + exact: h__mosip-kafka-host__h + ignore_case: true + principals: + - any: true + shadow_rules_stat_prefix: istio_ext_authz_ + - applyTo: HTTP_FILTER + match: + context: GATEWAY + listener: + portNumber: 8080 + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: envoy.filters.http.ext_authz + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + http_service: + server_uri: + uri: "http://oauth2-proxy.oauth2-proxy.svc.cluster.local" + cluster: "outbound|80||oauth2-proxy.oauth2-proxy.svc.cluster.local" + timeout: "600s" + authorization_request: + allowed_headers: + patterns: + - exact: authorization + ignore_case: true + - exact: cookie + ignore_case: true + headers_to_add: + - key: X-Auth-Request-Redirect + value: "https://%REQ(:authority)%%REQ(:path)%" + authorization_response: + allowed_upstream_headers: + patterns: + - exact: authorization + ignore_case: true + - exact: path + ignore_case: true + - exact: x-auth-access-token + ignore_case: true + - exact: x-auth-request-user + ignore_case: true + - exact: x-auth-request-email + ignore_case: true + - exact: x-auth-request-preferred-username + ignore_case: true + - exact: x-auth-request-groups + ignore_case: true + allowed_client_headers: + patterns: + - exact: content-type + ignore_case: true + - exact: set-cookie + ignore_case: true + transport_api_version: V3 + filter_enabled_metadata: + filter: envoy.filters.http.rbac + path: + - key: istio_ext_authz_shadow_effective_policy_id + value: + string_match: + prefix: istio-ext-authz + - applyTo: HTTP_FILTER + match: + context: GATEWAY + listener: + portNumber: 8080 + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: envoy.filters.http.jwt_authn + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication + providers: + oauth2-istio-0: + issuer: "https://h__mosip-iam-external-host__h/auth/realms/istio" + audiences: + - istio-auth-client + local_jwks: + inline_string: 'rjwks__https://h__mosip-iam-external-host__h/auth/realms/istio/protocol/openid-connect/certs__rjwks' + payload_in_metadata: istio_jwt_payload + forward: true + rules: + - match: + headers: + - name: ":authority" + exact_match: h__mosip-api-internal-host__h + prefix: "/httpbin" + requires: + provider_name: oauth2-istio-0 + - match: + headers: + - name: ":authority" + exact_match: h__mosip-api-internal-host__h + - name: ":path" + contains_match: "swagger" + prefix: "/" + requires: + provider_name: oauth2-istio-0 + - match: + headers: + - name: ":authority" + exact_match: h__mosip-kibana-host__h + prefix: "/" + requires: + provider_name: oauth2-istio-0 + - match: + headers: + - name: ":authority" + exact_match: h__mosip-kafka-host__h + prefix: "/" + requires: + provider_name: oauth2-istio-0 + - applyTo: HTTP_FILTER + match: + context: GATEWAY + listener: + portNumber: 8080 + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE value: name: envoy.filters.http.rbac.mydeny typed_config: @@ -71,25 +247,101 @@ spec: permissions: - and_rules: rules: - - or_rules: - rules: - - header: - name: ":authority" - exact_match: h__mosip-api-internal-host__h - - or_rules: - rules: - - url_path: - path: - prefix: "/httpbin" - # - url_path: - # path: - # prefix: "/hmmm" + - header: + name: ":authority" + string_match: + exact: h__mosip-api-internal-host__h + ignore_case: true + - url_path: + path: + prefix: "/httpbin" + principals: + - and_ids: + ids: + - not_id: + or_ids: + ids: + - metadata: + filter: envoy.filters.http.jwt_authn + path: + - key: istio_jwt_payload + - key: realm_access_roles + value: + list_match: + one_of: + string_match: + exact: httpbin_access + "swaggerPolicy": + permissions: + - and_rules: + rules: + - header: + name: ":authority" + string_match: + exact: h__mosip-api-internal-host__h + ignore_case: true + - url_path: + path: + contains: swagger + principals: + - and_ids: + ids: + - not_id: + or_ids: + ids: + - metadata: + filter: envoy.filters.http.jwt_authn + path: + - key: istio_jwt_payload + - key: realm_access_roles + value: + list_match: + one_of: + string_match: + exact: swagger_access + "kibanaPolicy": + permissions: + - header: + name: ":authority" + string_match: + exact: h__mosip-kibana-host__h + ignore_case: true + principals: + - and_ids: + ids: + - not_id: + or_ids: + ids: + - metadata: + filter: envoy.filters.http.jwt_authn + path: + - key: istio_jwt_payload + - key: realm_access_roles + value: + list_match: + one_of: + string_match: + exact: kibana_access + "kafkaUiPolicy": + permissions: + - header: + name: ":authority" + string_match: + exact: h__mosip-kafka-host__h + ignore_case: true principals: - and_ids: ids: - not_id: or_ids: ids: - - header: - name: x-auth-request-groups - contains_match: 'role:httpbin_access' + - metadata: + filter: envoy.filters.http.jwt_authn + path: + - key: istio_jwt_payload + - key: realm_access_roles + value: + list_match: + one_of: + string_match: + exact: kafka_ui_access diff --git a/deployment/v3/external/object-store/cred.sh b/deployment/v3/external/object-store/cred.sh index f0dd0dcd4..dc2bc4a03 100755 --- a/deployment/v3/external/object-store/cred.sh +++ b/deployment/v3/external/object-store/cred.sh @@ -16,9 +16,9 @@ function installing_Cred() { echo Istio label kubectl label ns $NS istio-injection=enabled --overwrite - echo Plesae select the type of object-store to be used: - echo 1: for minio native using helm charts - echo 2: for s3 object store + echo Select the type of object-store to be used: + echo 1: For minio native using helm charts + echo 2: For any other s3 object store like AWS while read -p "Please choose the correct option as mentioned above(1/2)" choice do if [ $choice = "1" ] @@ -33,11 +33,12 @@ function installing_Cred() { break elif [ $choice = "2" ] then - read -p "Please enter the S3 user key " USER - read -p "Please enter the S3 secret key" PASS - read -p "Please enter the S3 region" REGION + read -p "Enter the S3 user key " USER + read -p "Enter the S3 secret key" PASS + read -p "Enter the S3 region" REGION read -p "Please provide pretext value : " PRETEXT_VALUE kubectl -n s3 create configmap s3 --from-literal=s3-user-key=$USER --from-literal=s3-region=$REGION --dry-run=client -o yaml | kubectl apply -f - + kubectl -n s3 create secret generic s3 --from-literal=s3-user-secret=$PASS --dry-run=client -o yaml | kubectl apply -f - kubectl -n s3 create secret generic s3 --from-literal=s3-user-secret=$PASS --from-literal=s3-pretext-value=$PRETEXT_VALUE --dry-run=client -o yaml | kubectl apply -f - echo object-store secret and config map is set now. break @@ -56,4 +57,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -installing_Cred # calling function \ No newline at end of file +installing_Cred # calling function diff --git a/deployment/v3/external/object-store/minio/delete.sh b/deployment/v3/external/object-store/minio/delete.sh index 69c100dff..a8176bf47 100755 --- a/deployment/v3/external/object-store/minio/delete.sh +++ b/deployment/v3/external/object-store/minio/delete.sh @@ -28,4 +28,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_Minio # calling function \ No newline at end of file +deleting_Minio # calling function diff --git a/deployment/v3/external/object-store/minio/images/minio-dashboard.png b/deployment/v3/external/object-store/minio/images/minio-dashboard.png new file mode 100644 index 000000000..d55566ff1 Binary files /dev/null and b/deployment/v3/external/object-store/minio/images/minio-dashboard.png differ diff --git a/deployment/v3/external/object-store/minio/install.sh b/deployment/v3/external/object-store/minio/install.sh index bf3f677eb..e0db36b6c 100755 --- a/deployment/v3/external/object-store/minio/install.sh +++ b/deployment/v3/external/object-store/minio/install.sh @@ -14,7 +14,7 @@ kubectl label ns $NS istio-injection=enabled --overwrite function installing_minio() { echo Installing minio - helm -n minio install minio mosip/minio --version 10.1.6 + helm -n minio install minio mosip/minio -f values.yaml --version 10.1.6 echo Installing gateways and virtualservice EXTERNAL_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-minio-host}) diff --git a/deployment/v3/external/object-store/minio/values.yaml b/deployment/v3/external/object-store/minio/values.yaml new file mode 100755 index 000000000..7dbf1a22c --- /dev/null +++ b/deployment/v3/external/object-store/minio/values.yaml @@ -0,0 +1,9 @@ +metrics: + serviceMonitor: + enabled: true + +extraEnvVars: + - name: MINIO_PROMETHEUS_URL + value: "http://rancher-monitoring-prometheus.cattle-monitoring-system:9090" + - name: MINIO_PROMETHEUS_JOB_ID + value: "minio" diff --git a/deployment/v3/external/postgres/README.md b/deployment/v3/external/postgres/README.md index 602e96d2d..27b425db1 100644 --- a/deployment/v3/external/postgres/README.md +++ b/deployment/v3/external/postgres/README.md @@ -40,7 +40,6 @@ To initialized a specific db disable init of all others in `init_values.yaml` by ``` psql -h -p -U -f .dump ``` - ## DB Commons secret and postgres-postgresql secret creation * Base64 Encoding and YAML Creation Script: @@ -98,4 +97,3 @@ helm delete postgres-upgrade -n postgres ``` sed -i 's/LOCALE/LC_COLLATE/g' .dump ``` - diff --git a/deployment/v3/external/postgres/init_db.sh b/deployment/v3/external/postgres/init_db.sh index c234c8472..f052a1c3a 100755 --- a/deployment/v3/external/postgres/init_db.sh +++ b/deployment/v3/external/postgres/init_db.sh @@ -8,7 +8,7 @@ fi function initialize_db() { NS=postgres - CHART_VERSION=12.0.1-develop + CHART_VERSION=0.0.1-develop helm repo update while true; do read -p "CAUTION: all existing data will be lost. Are you sure?(Y/n)" yn diff --git a/deployment/v3/external/postgres/init_values.yaml b/deployment/v3/external/postgres/init_values.yaml index 48bc0a89e..90ba43e62 100644 --- a/deployment/v3/external/postgres/init_values.yaml +++ b/deployment/v3/external/postgres/init_values.yaml @@ -4,52 +4,60 @@ dbUserPasswords: databases: mosip_master: enabled: true - branch: v1.2.0.1 + branch: develop mosip_audit: enabled: true - branch: v1.2.0.1 + branch: develop mosip_keymgr: enabled: true - branch: v1.2.0.1 + branch: develop mosip_kernel: enabled: true - branch: v1.2.0.1 + branch: develop mosip_idmap: enabled: true - branch: v1.2.0.1 + branch: develop mosip_prereg: enabled: true - branch: v1.2.0.1 + branch: develop mosip_idrepo: enabled: true - branch: v1.2.0.1 + branch: develop mosip_ida: enabled: true - branch: v1.2.0.1 + branch: develop mosip_credential: enabled: true - branch: v1.2.0.1 + branch: develop mosip_regprc: enabled: true - branch: v1.2.0.1 + branch: develop mosip_pms: enabled: true - branch: v1.2.0.1 + branch: develop mosip_hotlist: enabled: true - branch: v1.2.0.1 + branch: develop mosip_resident: enabled: true - branch: v1.2.0.1 + branch: develop + + mosip_otp: + enabled: true + branch: develop + + mosip_digitalcard: + enabled: true + branch: develop diff --git a/deployment/v3/external/postgres/install.sh b/deployment/v3/external/postgres/install.sh index 960578d03..9366c44ec 100755 --- a/deployment/v3/external/postgres/install.sh +++ b/deployment/v3/external/postgres/install.sh @@ -15,9 +15,8 @@ kubectl label ns $NS istio-injection=enabled --overwrite function installing_postgres() { echo Installing Postgres - helm -n $NS install postgres bitnami/postgresql --version 12.11.1 -f values.yaml --wait + helm -n $NS install postgres bitnami/postgresql --version 13.1.5 -f values.yaml --wait echo Installed Postgres - echo Installing gateways and virtual services POSTGRES_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-postgres-host}) helm -n $NS install istio-addons chart/istio-addons --set postgresHost=$POSTGRES_HOST --wait diff --git a/deployment/v3/external/postgres/values.yaml b/deployment/v3/external/postgres/values.yaml index bbeb1d66f..a38f1c3a1 100644 --- a/deployment/v3/external/postgres/values.yaml +++ b/deployment/v3/external/postgres/values.yaml @@ -11,7 +11,6 @@ primary: requests: cpu: 3000m memory: 3000Mi - audit: logHostname: true logConnections: true diff --git a/deployment/v3/mosip/README.md b/deployment/v3/mosip/README.md index ce8fd7ad8..d1351dcb9 100644 --- a/deployment/v3/mosip/README.md +++ b/deployment/v3/mosip/README.md @@ -12,12 +12,16 @@ The steps here install all MOSIP provided services - core and reference implemen ## Install Install in the following order: -* [Config Server Secrets](conf-secrets/README.md) +* [Landing page](landing-page/README.md) +* [Docker secrets](docker-secrets/README.md) +* [Prereg captcha](captcha/README.md) * [Config Server](config-server/README.md) +* [captcha](../mosip/captcha/README.md) * [Artifactory](artifactory/README.md) * [Key Manager](keymanager/README.md) * [WebSub](websub/README.md) -* [Mock-smtp](mock-smtp/README.md) +* [Masterdata-loader](masterdata-loader/README.md) +* [Kernel](kernel/README.md) * [Masterdata-loader](masterdata-loader/) * [Kernel](kernel/README.md) * [Mock Biosdk](biosdk/README.md) @@ -36,7 +40,7 @@ Install in the following order: * [Mosip File Server](mosip-file-server/README.md) * [Resident Services](resident/README.md) * [Registration Client](regclient/README.md) -## Install +* [Restart Cron](https://github.com/mosip/mosip-infra/tree/develop/deployment/v3/mosip/restart-cron) The same can be achieved by running `all/install-all.sh`. ``` cd all diff --git a/deployment/v3/mosip/admin/README.md b/deployment/v3/mosip/admin/README.md index 67984445f..23d6e4aa6 100644 --- a/deployment/v3/mosip/admin/README.md +++ b/deployment/v3/mosip/admin/README.md @@ -4,7 +4,6 @@ ``` ./install.sh ``` - ## Admin proxy Admin service accesses other services like Materdata and Keymanager and currently there is only one URL that is used to connect to both these services. This will get fixed in future versions, but as a an interim solution, Admin Proxy docker has been created, which is basically an Nginx proxy connecting to the above services with these URLs: ``` diff --git a/deployment/v3/mosip/admin/delete.sh b/deployment/v3/mosip/admin/delete.sh index 0e4225aaa..bacc21ffd 100755 --- a/deployment/v3/mosip/admin/delete.sh +++ b/deployment/v3/mosip/admin/delete.sh @@ -12,7 +12,7 @@ function deleting_admin() { read -p "Are you sure you want to delete ALL Admin helm charts?(Y/n) " yn if [ $yn = "Y" ] then - kubectl delete -n $NS -f admin-proxy.yaml + kubectl delete -n $NS -f admin-proxy.yaml helm -n $NS delete admin-hotlist helm -n $NS delete admin-service helm -n $NS delete admin-ui @@ -30,4 +30,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_admin # calling function \ No newline at end of file +deleting_admin # calling function diff --git a/deployment/v3/mosip/all/delete-all.sh b/deployment/v3/mosip/all/delete-all.sh index 5efffb8dc..de5cc20c0 100755 --- a/deployment/v3/mosip/all/delete-all.sh +++ b/deployment/v3/mosip/all/delete-all.sh @@ -29,8 +29,11 @@ function Deleting_all() { "artifactory" "websub" "biosdk" + "partner-onboarder" + "restart-cron" "mock-smtp" ) + echo Deleting MOSIP services. for i in "${module[@]}" diff --git a/deployment/v3/mosip/all/install-all.sh b/deployment/v3/mosip/all/install-all.sh index a9bc06c1f..3f9c7001f 100755 --- a/deployment/v3/mosip/all/install-all.sh +++ b/deployment/v3/mosip/all/install-all.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then export KUBECONFIG=$1 fi -function installing_all() { +function Installing_all() { ROOT_DIR=`pwd`/../ declare -a module=("conf-secrets" @@ -30,10 +30,13 @@ function installing_all() { "admin" "ida" "print" + "mosip-file-server" + "resident" "partner-onboarder" "mosip-file-server" "resident" "regclient" + "restart-cron" ) echo Installing MOSIP services @@ -55,4 +58,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -installing_all # calling function +Installing_all # calling function diff --git a/deployment/v3/mosip/artifactory/delete.sh b/deployment/v3/mosip/artifactory/delete.sh index 9c29d09c6..ffd65f8cd 100755 --- a/deployment/v3/mosip/artifactory/delete.sh +++ b/deployment/v3/mosip/artifactory/delete.sh @@ -27,4 +27,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_artifactory # calling function \ No newline at end of file +deleting_artifactory # calling function diff --git a/deployment/v3/mosip/artifactory/install.sh b/deployment/v3/mosip/artifactory/install.sh index 34768435d..9c41154a1 100755 --- a/deployment/v3/mosip/artifactory/install.sh +++ b/deployment/v3/mosip/artifactory/install.sh @@ -7,10 +7,10 @@ if [ $# -ge 1 ] ; then fi NS=artifactory -CHART_VERSION=12.0.2 +CHART_VERSION=0.0.1-develop echo Create $NS namespace -kubectl create ns $NS +kubectl create ns $NS function installing_artifactory() { echo Istio label diff --git a/deployment/v3/mosip/artifactory/restart.sh b/deployment/v3/mosip/artifactory/restart.sh index e3d57332d..b82a3d039 100755 --- a/deployment/v3/mosip/artifactory/restart.sh +++ b/deployment/v3/mosip/artifactory/restart.sh @@ -12,7 +12,7 @@ function Restarting_artifactory() { kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status - echo Restarted artifactory services + echo Restarted Artifactory services return 0 } @@ -22,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_artifactory # calling function \ No newline at end of file +Restarting_artifactory # calling function diff --git a/deployment/v3/mosip/biosdk/delete.sh b/deployment/v3/mosip/biosdk/delete.sh index dd3edf41a..a0e1e9df1 100755 --- a/deployment/v3/mosip/biosdk/delete.sh +++ b/deployment/v3/mosip/biosdk/delete.sh @@ -27,4 +27,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_biosdk # calling function \ No newline at end of file +deleting_biosdk # calling function diff --git a/deployment/v3/mosip/biosdk/install.sh b/deployment/v3/mosip/biosdk/install.sh index 64f30e65f..b245d88ea 100755 --- a/deployment/v3/mosip/biosdk/install.sh +++ b/deployment/v3/mosip/biosdk/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=biosdk -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS @@ -22,7 +22,8 @@ function installing_biosdk() { ./copy_cm.sh echo Installing Biosdk server - helm -n $NS install biosdk-service mosip/biosdk-service -f values.yaml --version $CHART_VERSION + helm -n $NS install biosdk-service mosip/biosdk-service -f values.yaml --version $CHART_VERSION + echo Biosdk service installed sucessfully. return 0 } diff --git a/deployment/v3/mosip/bqatsdk/install.sh b/deployment/v3/mosip/bqatsdk/install.sh index 78885a9ed..0ea8a9918 100755 --- a/deployment/v3/mosip/bqatsdk/install.sh +++ b/deployment/v3/mosip/bqatsdk/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=bqatsdk -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS @@ -23,7 +23,7 @@ function installing_bqatsdk() { echo Installing Bqatsdk server helm -n $NS install bqatsdk-service mosip/biosdk-service \ - --set extraEnvVars[0].name="server_servlet_context_env" \ + --set extraEnvVars[0].name="server_servlet_context_path_env" \ --set extraEnvVars[0].value="/bqatsdk-service" \ --set extraEnvVars[1].name="spring_application_name_env" \ --set extraEnvVars[1].value="bqat-sdk" \ diff --git a/deployment/v3/mosip/captcha/README.md b/deployment/v3/mosip/captcha/README.md new file mode 100644 index 000000000..1105308a8 --- /dev/null +++ b/deployment/v3/mosip/captcha/README.md @@ -0,0 +1,11 @@ +# Pre-registration Portal Captcha + +## Pre-requisites +* Create a google recaptcha v2 ("I am not a Robot") from Google [Recaptcha Admin](https://www.google.com/recaptcha). +* Give the domain name as your PreReg domain name. Example "prereg.sandbox.xyz.net". + +## Install +Create the configmap and secret in `prereg` namespace by running the following: +```sh +./install.sh [kubeconfig] +``` diff --git a/deployment/v3/mosip/captcha/copy_cm.sh b/deployment/v3/mosip/captcha/copy_cm.sh new file mode 100755 index 000000000..be231e2f2 --- /dev/null +++ b/deployment/v3/mosip/captcha/copy_cm.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copy configmaps from other namespaces +# DST_NS: Destination namespace + +function copying_cm() { + COPY_UTIL=./copy_cm_func.sh + DST_NS=captcha + + $COPY_UTIL configmap global default $DST_NS + $COPY_UTIL configmap artifactory-share artifactory $DST_NS + $COPY_UTIL configmap config-server-share config-server $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_cm # calling function \ No newline at end of file diff --git a/deployment/v3/mosip/captcha/copy_cm_func.sh b/deployment/v3/mosip/captcha/copy_cm_func.sh new file mode 100755 index 000000000..1bbad7126 --- /dev/null +++ b/deployment/v3/mosip/captcha/copy_cm_func.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# Copy configmap and secret from one namespace to another. +# ./copy_cm_func.sh [name] +# Parameters: +# resource: configmap|secret +# name: Optional new name of the configmap or secret in destination namespace. This may be needed if there is +# clash of names + +if [ $1 = "configmap" ] +then + RESOURCE=configmap +elif [ $1 = "secret" ] +then + RESOURCE=secret +else + echo "Incorrect resource $1. Exiting.." + exit 1 +fi + + +if [ $# -ge 5 ] +then + kubectl -n $4 delete --ignore-not-found=true $RESOURCE $5 + kubectl -n $3 get $RESOURCE $2 -o yaml | sed "s/namespace: $3/namespace: $4/g" | sed "s/name: $2/name: $5/g" | kubectl -n $4 create -f - +else + kubectl -n $4 delete --ignore-not-found=true $RESOURCE $2 + kubectl -n $3 get $RESOURCE $2 -o yaml | sed "s/namespace: $3/namespace: $4/g" | kubectl -n $4 create -f - +fi + + + + diff --git a/deployment/v3/mosip/captcha/delete.sh b/deployment/v3/mosip/captcha/delete.sh new file mode 100755 index 000000000..544b1555b --- /dev/null +++ b/deployment/v3/mosip/captcha/delete.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Uninstalls all captcha helm charts +function deleting_captcha() { + while true; do + read -p "Are you sure you want to delete all captcha helm charts?(Y/n) " yn + if [ $yn = "Y" ] + then + helm -n captcha delete captcha + break + else + break + fi + done + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +deleting_captcha # calling function \ No newline at end of file diff --git a/deployment/v3/mosip/captcha/get_logs.sh b/deployment/v3/mosip/captcha/get_logs.sh new file mode 100755 index 000000000..4a1697e21 --- /dev/null +++ b/deployment/v3/mosip/captcha/get_logs.sh @@ -0,0 +1,3 @@ +#!/bin/sh +# pod name +kubectl -n captcha logs -f $1 | grep -v "/v1/captcha/actuator/health" | grep -v "/v1/captcha/actuator/prometheus" \ No newline at end of file diff --git a/deployment/v3/mosip/captcha/install.sh b/deployment/v3/mosip/captcha/install.sh new file mode 100755 index 000000000..72f60f520 --- /dev/null +++ b/deployment/v3/mosip/captcha/install.sh @@ -0,0 +1,63 @@ +#!/bin/bash +# Creates configmap and secrets for Prereg Captcha +# Creates configmap and secrets for resident Captcha +## Usage: ./install.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +NS=captcha +CHART_VERSION=0.0.1-develop + +PREREG_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-prereg-host}) +RESIDENT_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-resident-host}) +ESIGNET_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-esignet-host}) + +echo Create $NS namespace +kubectl create ns $NS + +function Prereg_Captcha() { + echo Please enter the recaptcha admin site key for domain $PREREG_HOST + read SITE_KEY + echo Please enter the recaptcha admin secret key for domain $PREREG_HOST + read SECRET_KEY + echo Please enter the recaptcha admin site key for domain $RESIDENT_HOST + read RSITE_KEY + echo Please enter the recaptcha admin secret key for domain $RESIDENT_HOST + read RSECRET_KEY + echo Please enter the recaptcha admin site key for domain $ESIGNET_HOST + read ESITE_KEY + echo Please enter the recaptcha admin secret key for domain $ESIGNET_HOST + read ESECRET_KEY + + echo Setting up captcha secrets + kubectl -n $NS create secret generic mosip-captcha --from-literal=prereg-captcha-site-key=$SITE_KEY --from-literal=prereg-captcha-secret-key=$SECRET_KEY --from-literal=resident-captcha-site-key=$RSITE_KEY --from-literal=resident-captcha-secret-key=$RSECRET_KEY --from-literal=esignet-captcha-site-key=$ESITE_KEY --from-literal=esignet-captcha-secret-key=$ESECRET_KEY --dry-run=client -o yaml | kubectl apply -f - +} + +function installing_captcha() { + echo Istio label + + kubectl label ns $NS istio-injection=disabled --overwrite + helm repo update + + echo Copy configmaps + sed -i 's/\r$//' copy_cm.sh + ./copy_cm.sh + + echo Installing captcha + helm -n $NS install captcha mosip/captcha --version $CHART_VERSION + + echo Installed captcha service + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes + +Prereg_Captcha # calling function +installing_captcha # calling second function diff --git a/deployment/v3/mosip/captcha/restart.sh b/deployment/v3/mosip/captcha/restart.sh new file mode 100755 index 000000000..eaf4bf056 --- /dev/null +++ b/deployment/v3/mosip/captcha/restart.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Restart the captcha services +## Usage: ./restart.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +function Restarting_captcha() { + NS=captcha + kubectl -n $NS rollout restart deploy + + kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status + + echo Restarted captcha services + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +Restarting_captcha # calling function \ No newline at end of file diff --git a/deployment/v3/mosip/conf-secrets/install.sh b/deployment/v3/mosip/conf-secrets/install.sh index 3ff2cdd51..3c1c2e2bf 100755 --- a/deployment/v3/mosip/conf-secrets/install.sh +++ b/deployment/v3/mosip/conf-secrets/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=conf-secrets -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS diff --git a/deployment/v3/mosip/config-server/README.md b/deployment/v3/mosip/config-server/README.md index 1d8899cf7..87b23a7b5 100644 --- a/deployment/v3/mosip/config-server/README.md +++ b/deployment/v3/mosip/config-server/README.md @@ -13,6 +13,19 @@ Config server serves all properties required by MOSIP modules. This must be inst ./install.sh ``` +## Delete +* To delete config-server. +```sh +./delete.sh +``` + +## Enable config-server to pull configurations from local git repository. - +Enable Config-server to Pull Configurations from Local Repository: +* While running the install script the user will be prompted to decide whether the config-server should pull configurations from a local repository (NFS). +* If the user choose to use local git repository then the user will be asked to provide the NFS path(Dir where local repository is cloned) and the NFS server IP. +* If the user choose to not to pull configurations from a local repository (NFS) then the configurations will be pulled from remote repository which is defined in values.yaml file. +Note: +* Before choosing to pull configurations from a local repository (NFS) the user must clone the config-server repository manually into the nfs server where the configurations can be maintained. +* And checkout to the specific branch from where the configurations need to be taken. diff --git a/deployment/v3/mosip/config-server/copy_cm.sh b/deployment/v3/mosip/config-server/copy_cm.sh index ae5c122c4..42b611e24 100755 --- a/deployment/v3/mosip/config-server/copy_cm.sh +++ b/deployment/v3/mosip/config-server/copy_cm.sh @@ -1,6 +1,6 @@ #!/bin/bash # Copy configmaps from other namespaces - + function copying_cm() { COPY_UTIL=../../utils/copy_cm_func.sh DST_NS=config-server # DST_NS: Destination namespace @@ -20,4 +20,3 @@ set -o nounset ## set -u : exit the script if you try to use an uninitialised set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes copying_cm # calling function - diff --git a/deployment/v3/mosip/config-server/install.sh b/deployment/v3/mosip/config-server/install.sh index 2fe714933..73e1903c2 100755 --- a/deployment/v3/mosip/config-server/install.sh +++ b/deployment/v3/mosip/config-server/install.sh @@ -7,10 +7,29 @@ if [ $# -ge 1 ] ; then fi NS=config-server -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop -read -p "Is conf-secrets module installed?(Y/n) " yn -if [ $yn = "Y" ]; then read -p "Is values.yaml for config-server chart set correctly as part of Pre-requisites?(Y/n) " yn; fi +read -p "Is conf-secrets module installed?(Y/n) " conf_installed +read -p "Do you want to enable config-server to pull configurations from local repository?(Y/n)( Default: n )" repo_enabled + +if [[ -z $repo_enabled ]]; then + repo_enabled=n +fi + +if [ "$repo_enabled" = "Y" ]; then + LOCALREPO="true" + read -p "Provide the NFS path where the local repository is cloned/maintained: " path + NFS_PATH="$path" + + read -p "Provide the NFS IP address of the server where the local repository is cloned: " ip + NFS_SERVER="$ip" +else + LOCALREPO="false" + NFS_PATH="" + NFS_SERVER="" +fi + +if [ $conf_installed = "Y" ]; then read -p "Is values.yaml for config-server chart set correctly as part of Pre-requisites?(Y/n) " yn; fi if [ $yn = "Y" ] then echo Create $NS namespace @@ -35,9 +54,14 @@ if [ $yn = "Y" ] sed -i 's/\r$//' copy_secrets.sh ./copy_secrets.sh - echo Installing config-server - helm -n $NS install config-server mosip/config-server -f values.yaml --wait --version $CHART_VERSION - echo Installed Config-server. + echo "Installing config-server" + helm -n $NS install config-server mosip/config-server \ + --set localRepo.enabled="$LOCALREPO" \ + --set volume.nfs.path="$NFS_PATH" \ + --set volume.nfs.server="$NFS_SERVER" \ + -f values.yaml \ + --wait --version $CHART_VERSION + echo "Installed Config-server". else echo Exiting the MOSIP installation. Please meet the pre-requisites and than start again. kill -9 `ps --pid $$ -oppid=`; exit diff --git a/deployment/v3/mosip/config-server/restart.sh b/deployment/v3/mosip/config-server/restart.sh index db27be57e..ad2e5f04e 100755 --- a/deployment/v3/mosip/config-server/restart.sh +++ b/deployment/v3/mosip/config-server/restart.sh @@ -6,7 +6,6 @@ if [ $# -ge 1 ] ; then export KUBECONFIG=$1 fi - function config_server() { NS=config-server kubectl -n $NS rollout restart deploy @@ -23,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -config_server # calling function \ No newline at end of file +config_server # calling function diff --git a/deployment/v3/mosip/config-server/values.yaml b/deployment/v3/mosip/config-server/values.yaml index ee64d32f6..73cc1bb9f 100644 --- a/deployment/v3/mosip/config-server/values.yaml +++ b/deployment/v3/mosip/config-server/values.yaml @@ -7,3 +7,12 @@ gitRepo: ## User name of user who has access to the private repo. Ignore for public repo username: "" token: "" + +localRepo: + enabled: false # Set this to "true" inorder to Enable config-server to pull configurations from local git repo. + spring_profiles_active: "native" + spring_cloud_config_server_native_search_locations: "file:///var/lib/config_repo" + spring_cloud_config_server_accept_empty: true + spring_cloud_config_server_git_force_pull: false + spring_cloud_config_server_git_refreshRate: 0 + spring_cloud_config_server_git_cloneOnStart: false diff --git a/deployment/v3/mosip/credential-feeder/.gitignore b/deployment/v3/mosip/credential-feeder/.gitignore new file mode 100644 index 000000000..ee3892e87 --- /dev/null +++ b/deployment/v3/mosip/credential-feeder/.gitignore @@ -0,0 +1 @@ +charts/ diff --git a/deployment/v3/mosip/credential-feeder/.helmignore b/deployment/v3/mosip/credential-feeder/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/deployment/v3/mosip/credential-feeder/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deployment/v3/mosip/credential-feeder/README.md b/deployment/v3/mosip/credential-feeder/README.md new file mode 100644 index 000000000..2e9532251 --- /dev/null +++ b/deployment/v3/mosip/credential-feeder/README.md @@ -0,0 +1,6 @@ +# Credentialfeeder + +## Install +```sh +./install.sh +``` diff --git a/deployment/v3/mosip/credential-feeder/copy_cm.sh b/deployment/v3/mosip/credential-feeder/copy_cm.sh new file mode 100755 index 000000000..7c226fb79 --- /dev/null +++ b/deployment/v3/mosip/credential-feeder/copy_cm.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copy configmaps from other namespaces +# DST_NS: Destination namespace + +function copying_cm() { + COPY_UTIL=../../utils/copy_cm_func.sh + DST_NS=credentialfeeder + + $COPY_UTIL configmap global default $DST_NS + $COPY_UTIL configmap artifactory-share artifactory $DST_NS + $COPY_UTIL configmap config-server-share config-server $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_cm # calling function diff --git a/deployment/v3/mosip/credential-feeder/delete.sh b/deployment/v3/mosip/credential-feeder/delete.sh new file mode 100755 index 000000000..fbcbedbce --- /dev/null +++ b/deployment/v3/mosip/credential-feeder/delete.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Uninstalls idrepo services +## Usage: ./delete.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +function deleting_credentialfeeder() { + NS=credentialfeeder + while true; do + read -p "Are you sure you want to delete idrepo helm chart?(Y/n) " yn + if [ $yn = "Y" ] + then + helm -n $NS delete credentialfeeder + break + else + break + fi + done + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +deleting_credentialfeeder # calling function diff --git a/deployment/v3/mosip/credential-feeder/install.sh b/deployment/v3/mosip/credential-feeder/install.sh new file mode 100755 index 000000000..703da39e7 --- /dev/null +++ b/deployment/v3/mosip/credential-feeder/install.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Installs idrepo +## Usage: ./install.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +NS=credentialfeeder +CHART_VERSION=1.0.0 + +echo Create $NS namespace +kubectl create ns $NS + +function installing_credentialfeeder() { + echo Istio label + kubectl label ns $NS istio-injection=enabled --overwrite + helm repo add mosip https://mosip.github.io/mosip-helm + helm repo update + + echo Copy configmaps + sed -i 's/\r$//' copy_cm.sh + ./copy_cm.sh + + echo Running credentialfeeder job + helm -n $NS install credentialfeeder mosip/credentialfeeder --version $CHART_VERSION --wait --wait-for-jobs + + echo Installed credentialfeeder + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +installing_credentialfeeder # calling function diff --git a/deployment/v3/mosip/databreachdetector/copy_cm.sh b/deployment/v3/mosip/databreachdetector/copy_cm.sh new file mode 100755 index 000000000..487ea261a --- /dev/null +++ b/deployment/v3/mosip/databreachdetector/copy_cm.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copy configmaps from other namespaces +# DST_NS: Destination (current) namespace + +function copying_cm() { + COPY_UTIL=../../utils/copy_cm_func.sh + DST_NS=databreachdetector + + $COPY_UTIL configmap global default $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_cm # calling function + + + diff --git a/deployment/v3/mosip/databreachdetector/copy_secrets.sh b/deployment/v3/mosip/databreachdetector/copy_secrets.sh new file mode 100755 index 000000000..0616cf183 --- /dev/null +++ b/deployment/v3/mosip/databreachdetector/copy_secrets.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copy secrets from other namespaces +# DST_NS: Destination namespace + +function copying_secrets() { + COPY_UTIL=../../utils/copy_cm_func.sh + DST_NS=databreachdetector + $COPY_UTIL secret s3 s3 $DST_NS + $COPY_UTIL secret postgres-postgresql postgres $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_secrets # calling function \ No newline at end of file diff --git a/deployment/v3/mosip/databreachdetector/delete.sh b/deployment/v3/mosip/databreachdetector/delete.sh new file mode 100755 index 000000000..b76972568 --- /dev/null +++ b/deployment/v3/mosip/databreachdetector/delete.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Uninstalls print service +## Usage: ./delete.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +function deleting_databreachdetector() { + NS=databreachdetector + while true; do + read -p "Are you sure you want to delete print helm chart?(Y/n) " yn + if [ $yn = "Y" ] + then + helm -n $NS delete databreachdetector + break + else + break + fi + done + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +deleting_databreachdetector # calling function diff --git a/deployment/v3/mosip/databreachdetector/install.sh b/deployment/v3/mosip/databreachdetector/install.sh new file mode 100755 index 000000000..30f7f2e34 --- /dev/null +++ b/deployment/v3/mosip/databreachdetector/install.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# Installs sample print service +## Usage: ./restart.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + + +NS=databreachdetector +CHART_VERSION=0.0.1-develop + +echo Create $NS namespace +kubectl create ns $NS + +function installing_databreachdetector() { + echo Istio label + kubectl label ns $NS istio-injection=disabled --overwrite + helm repo update + + echo Copy configmaps + sed -i 's/\r$//' copy_cm.sh + kubectl -n $NS delete --ignore-not-found=true cm s3 + ./copy_cm.sh + + echo Copy secrets + sed -i 's/\r$//' copy_secrets.sh + ./copy_secrets.sh + + DB_HOST=$( kubectl -n default get cm global -o json |jq -r '.data."mosip-postgres-host"' ) + S3_USER_KEY=$( kubectl -n s3 get cm s3 -o json |jq -r '.data."s3-user-key"' ) + S3_REGION=$( kubectl -n s3 get cm s3 -o json |jq -r '.data."s3-region"' ) + + echo Installing databreachdetector + helm -n $NS install databreachdetector mosip/databreachdetector --wait --version $CHART_VERSION \ + --set databreachdetector.configmaps.db.db-server="$DB_HOST" \ + --set databreachdetector.configmaps.s3.s3-bucket-name='secure-datarig' \ + --set databreachdetector.configmaps.s3.s3-region="$S3_REGION" \ + --set databreachdetector.configmaps.s3.s3-host='minio.minio:9000' \ + --set databreachdetector.configmaps.s3.s3-user-key="$S3_USER_KEY" + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +installing_databreachdetector # calling function diff --git a/deployment/v3/mosip/datashare/delete.sh b/deployment/v3/mosip/datashare/delete.sh index abfbb918a..636038d4c 100755 --- a/deployment/v3/mosip/datashare/delete.sh +++ b/deployment/v3/mosip/datashare/delete.sh @@ -27,4 +27,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_datashare # calling function \ No newline at end of file +deleting_datashare # calling function diff --git a/deployment/v3/mosip/datashare/install.sh b/deployment/v3/mosip/datashare/install.sh index 3fde89ff8..c475724ab 100755 --- a/deployment/v3/mosip/datashare/install.sh +++ b/deployment/v3/mosip/datashare/install.sh @@ -8,7 +8,6 @@ fi NS=datashare CHART_VERSION=12.0.1 - echo Create $NS namespace kubectl create ns $NS diff --git a/deployment/v3/mosip/datashare/restart.sh b/deployment/v3/mosip/datashare/restart.sh index 649141dbd..f38dbcb75 100755 --- a/deployment/v3/mosip/datashare/restart.sh +++ b/deployment/v3/mosip/datashare/restart.sh @@ -11,6 +11,7 @@ function Restarting_datashare() { kubectl -n $NS rollout restart deploy kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status + echo Restarted datashare services return 0 } @@ -21,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_datashare # calling function \ No newline at end of file +Restarting_datashare # calling function diff --git a/deployment/v3/mosip/ida/delete.sh b/deployment/v3/mosip/ida/delete.sh index ce306ad49..4a400a2bd 100755 --- a/deployment/v3/mosip/ida/delete.sh +++ b/deployment/v3/mosip/ida/delete.sh @@ -30,4 +30,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_ida # calling function \ No newline at end of file +deleting_ida # calling function diff --git a/deployment/v3/mosip/ida/install.sh b/deployment/v3/mosip/ida/install.sh index a3856f774..bea94cfab 100755 --- a/deployment/v3/mosip/ida/install.sh +++ b/deployment/v3/mosip/ida/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=ida -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS @@ -35,20 +35,55 @@ function installing_ida() { ENABLE_INSECURE='--set enable_insecure=true'; fi + default_enable_volume=false + read -p "Would you like to enable volume (true/false) : [ default : false ] : " enable_volume + enable_volume=${enable_volume:-$default_enable_volume} + + IDA_KEYGEN_HELM_ARGS='--set springConfigNameEnv="id-authentication" --set softHsmCM="softhsm-ida-share"' + IDA_HELM_ARGS='' + if [[ $enable_volume == 'true' ]]; then + + default_volume_size=100M + read -p "Provide the size for volume [ default : 100M ]" volume_size + volume_size=${volume_size:-$default_volume_size} + + default_volume_mount_path='/home/mosip/config/' + read -p "Provide the mount path for volume [ default : '/home/mosip/config/' ] : " volume_mount_path + volume_mount_path=${volume_mount_path:-$default_volume_mount_path} + + PVC_CLAIM_NAME='ida-keygen-keymanager' + IDA_KEYGEN_HELM_ARGS="--set persistence.enabled=true \ + --set volumePermissions.enabled=true \ + --set persistence.size=$volume_size \ + --set persistence.mountDir=\"$volume_mount_path\" \ + --set springConfigNameEnv='id-authentication' \ + --set persistence.pvc_claim_name=\"$PVC_CLAIM_NAME\" \ + " + IDA_HELM_ARGS="--set persistence.enabled=true \ + --set volumePermissions.enabled=true \ + --set persistence.mountDir=\"$volume_mount_path\" \ + --set persistence.existingClaim=\"$PVC_CLAIM_NAME\" \ + --set extraEnvVarsCM={'global','config-server-share','artifactory-share'} \ + " + fi + echo "IDA KEYGEN HELM ARGS $IDA_KEYGEN_HELM_ARGS" + echo "IDA HELM ARGS $IDA_HELM_ARGS" + echo Running ida keygen - helm -n $NS install ida-keygen mosip/keygen --wait --wait-for-jobs --version $CHART_VERSION -f keygen_values.yaml + helm -n $NS install ida-keygen mosip/keygen $IDA_KEYGEN_HELM_ARGS --wait --wait-for-jobs --version $CHART_VERSION echo Installing ida auth - helm -n $NS install ida-auth mosip/ida-auth --version $CHART_VERSION $ENABLE_INSECURE + helm -n $NS install ida-auth mosip/ida-auth $IDA_HELM_ARGS --version $CHART_VERSION $ENABLE_INSECURE echo Installing ida internal - helm -n $NS install ida-internal mosip/ida-internal --version $CHART_VERSION $ENABLE_INSECURE + helm -n $NS install ida-internal mosip/ida-internal $IDA_HELM_ARGS --version $CHART_VERSION $ENABLE_INSECURE echo Installing ida otp - helm -n $NS install ida-otp mosip/ida-otp --version $CHART_VERSION $ENABLE_INSECURE + helm -n $NS install ida-otp mosip/ida-otp $IDA_HELM_ARGS --version $CHART_VERSION $ENABLE_INSECURE kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status - echo Intalled ida services + + echo Installed ida services return 0 } diff --git a/deployment/v3/mosip/ida/restart.sh b/deployment/v3/mosip/ida/restart.sh index c619b1c28..5887e2a04 100755 --- a/deployment/v3/mosip/ida/restart.sh +++ b/deployment/v3/mosip/ida/restart.sh @@ -22,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_ida # calling function \ No newline at end of file +Restarting_ida # calling function diff --git a/deployment/v3/mosip/idrepo/copy_cm.sh b/deployment/v3/mosip/idrepo/copy_cm.sh index 623d11108..63432ab60 100755 --- a/deployment/v3/mosip/idrepo/copy_cm.sh +++ b/deployment/v3/mosip/idrepo/copy_cm.sh @@ -18,4 +18,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_cm # calling function \ No newline at end of file +copying_cm # calling function diff --git a/deployment/v3/mosip/idrepo/delete.sh b/deployment/v3/mosip/idrepo/delete.sh index 3ed3fe3f0..0becc82c0 100755 --- a/deployment/v3/mosip/idrepo/delete.sh +++ b/deployment/v3/mosip/idrepo/delete.sh @@ -31,4 +31,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_idrepo # calling function \ No newline at end of file +deleting_idrepo # calling function diff --git a/deployment/v3/mosip/idrepo/install.sh b/deployment/v3/mosip/idrepo/install.sh index 6f9901f64..7b87e9b93 100755 --- a/deployment/v3/mosip/idrepo/install.sh +++ b/deployment/v3/mosip/idrepo/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=idrepo -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS @@ -37,6 +37,7 @@ function installing_idrepo() { helm -n $NS install vid mosip/vid --version $CHART_VERSION kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status + echo Installed idrepo services return 0 } diff --git a/deployment/v3/mosip/image-compressor/install.sh b/deployment/v3/mosip/image-compressor/install.sh index 4b6fcd302..79d685b89 100644 --- a/deployment/v3/mosip/image-compressor/install.sh +++ b/deployment/v3/mosip/image-compressor/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=image-compressor -CHART_VERSION=12.0.1-B3 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS @@ -23,7 +23,7 @@ function installing_imagecompressor() { echo Installing imagecompressor server helm -n $NS install image-compressor mosip/biosdk-service \ - --set extraEnvVars[0].name="server_servlet_context_env" \ + --set extraEnvVars[0].name="server_servlet_context_path_env" \ --set extraEnvVars[0].value="/image-compressor" \ --set extraEnvVars[1].name="spring_application_name_env" \ --set extraEnvVars[1].value="image-compressor" \ diff --git a/deployment/v3/mosip/kernel/copy_cm.sh b/deployment/v3/mosip/kernel/copy_cm.sh index c0a53de79..c152cf88b 100755 --- a/deployment/v3/mosip/kernel/copy_cm.sh +++ b/deployment/v3/mosip/kernel/copy_cm.sh @@ -18,4 +18,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_cm # calling function \ No newline at end of file +copying_cm # calling function diff --git a/deployment/v3/mosip/kernel/delete.sh b/deployment/v3/mosip/kernel/delete.sh index 973a85695..e803e59ff 100755 --- a/deployment/v3/mosip/kernel/delete.sh +++ b/deployment/v3/mosip/kernel/delete.sh @@ -6,7 +6,6 @@ if [ $# -ge 1 ] ; then export KUBECONFIG=$1 fi - function deleting_kernel() { NS=kernel while true; do @@ -36,4 +35,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_kernel # calling function \ No newline at end of file +deleting_kernel # calling function diff --git a/deployment/v3/mosip/kernel/install.sh b/deployment/v3/mosip/kernel/install.sh index be9a99069..203d7672b 100755 --- a/deployment/v3/mosip/kernel/install.sh +++ b/deployment/v3/mosip/kernel/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=kernel -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS diff --git a/deployment/v3/mosip/kernel/restart.sh b/deployment/v3/mosip/kernel/restart.sh index 73d6d8b18..b286217c1 100755 --- a/deployment/v3/mosip/kernel/restart.sh +++ b/deployment/v3/mosip/kernel/restart.sh @@ -21,4 +21,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_kernel # calling function \ No newline at end of file +Restarting_kernel # calling function diff --git a/deployment/v3/mosip/key-migration-utility/README.md b/deployment/v3/mosip/key-migration-utility/README.md new file mode 100644 index 000000000..de2cd886c --- /dev/null +++ b/deployment/v3/mosip/key-migration-utility/README.md @@ -0,0 +1,9 @@ +# HSM Key Migrator + +## Install +The HSM Key migrator is done by running the below script: +``` +./install.sh +``` + + diff --git a/deployment/v3/mosip/key-migration-utility/copy_cm.sh b/deployment/v3/mosip/key-migration-utility/copy_cm.sh new file mode 100755 index 000000000..bc227055b --- /dev/null +++ b/deployment/v3/mosip/key-migration-utility/copy_cm.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# Copy configmaps from other namespaces +# DST_NS: Destination (current) namespace + +function copying_cm() { + COPY_UTIL=../../utils/copy_cm_func.sh + DST_NS=key-migration-utility + + module=$1 + $COPY_UTIL configmap global default $DST_NS + $COPY_UTIL configmap artifactory-share artifactory $DST_NS + $COPY_UTIL configmap config-server-share config-server $DST_NS + $COPY_UTIL configmap softhsm-$module-share softhsm $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes + +if [[ $# -lt 1 ]]; then + echo "SoftHsm module name not passed; EXITING;" + exit 0; +fi +copying_cm $1 # calling function + + + diff --git a/deployment/v3/mosip/key-migration-utility/delete.sh b/deployment/v3/mosip/key-migration-utility/delete.sh new file mode 100755 index 000000000..ab5a9fa30 --- /dev/null +++ b/deployment/v3/mosip/key-migration-utility/delete.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# Uninstalls key-migration-utility +## Usage: ./delete.sh [kubeconfig] + +if [ $# -ge 2 ] ; then + export KUBECONFIG=$2 +fi + +function deleting_hsm_key_migrator() { + NS=key-migration-utility + module=$1 + while true; do + read -p "Are you sure you want to delete key-migration-utility helm chart?(Y/n) " yn + if [ $yn = "Y" ] + then + helm -n $NS list + helm -n $NS delete "key-migration-utility-$module" + break + else + break + fi + done + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +if [[ $# -lt 1 ]]; then + echo "SoftHsm module name not passed; EXITING;" + exit 0; +fi +deleting_hsm_key_migrator $1 # calling function diff --git a/deployment/v3/mosip/key-migration-utility/install.sh b/deployment/v3/mosip/key-migration-utility/install.sh new file mode 100755 index 000000000..3a38764be --- /dev/null +++ b/deployment/v3/mosip/key-migration-utility/install.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# Installs key-migration-utility +## Usage: ./install.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +NS=key-migration-utility +CHART_VERSION=0.0.1-develop + +echo Creating $NS namespace +kubectl create ns $NS + +function installing_key_migration_utility() { + + helm repo update + + read -p "please provide module name for migration (ex: kernel, ida, esignet, etc.) : " module + + if [[ -z $module ]]; then + echo "module is empty; EXITING;" + exit 0 + fi + + read -p "please provide properties file name (ex: migration ) : " config_prop + + if [[ -z $config_prop ]]; then + echo "config properties is empty; EXITING;" + exit 0 + fi + + echo Copy configmaps + sed -i 's/\r$//' copy_cm.sh + ./copy_cm.sh $module + + echo Installing key-migration-utility + helm -n $NS install key-migration-utility-$module mosip/key-migration-utility \ + --set softHsmCM=softhsm-$module-share \ + --set springConfigNameEnv=$config_prop \ + --wait --wait-for-jobs \ + --version $CHART_VERSION + + echo Installed key-migration-utility services + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +installing_key_migration_utility # calling function diff --git a/deployment/v3/mosip/keymanager/delete.sh b/deployment/v3/mosip/keymanager/delete.sh index 189d302fa..f548b3870 100755 --- a/deployment/v3/mosip/keymanager/delete.sh +++ b/deployment/v3/mosip/keymanager/delete.sh @@ -28,4 +28,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_keymanager # calling function \ No newline at end of file +deleting_keymanager # calling function diff --git a/deployment/v3/mosip/keymanager/idle_timeout_envoyfilter.yaml b/deployment/v3/mosip/keymanager/idle_timeout_envoyfilter.yaml index fd45a91d7..9a8fd4992 100644 --- a/deployment/v3/mosip/keymanager/idle_timeout_envoyfilter.yaml +++ b/deployment/v3/mosip/keymanager/idle_timeout_envoyfilter.yaml @@ -16,5 +16,5 @@ spec: value: name: envoy.filters.network.tcp_proxy typed_config: - '@type': type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + '@type': type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy idle_timeout: 0s diff --git a/deployment/v3/mosip/keymanager/install.sh b/deployment/v3/mosip/keymanager/install.sh index b682c7ddb..6321b5848 100755 --- a/deployment/v3/mosip/keymanager/install.sh +++ b/deployment/v3/mosip/keymanager/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=keymanager -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Creating $NS namespace kubectl create ns $NS @@ -22,11 +22,45 @@ function installing_keymanager() { sed -i 's/\r$//' copy_cm.sh ./copy_cm.sh + default_enable_volume=false + read -p "Would you like to enable volume (true/false) : [ default : false ] : " enable_volume + enable_volume=${enable_volume:-$default_enable_volume} + + KERNEL_KEYGEN_HELM_ARGS='--set springConfigNameEnv="kernel" --set softHsmCM="softhsm-kernel-share"' + KERNEL_HELM_ARGS='' + if [[ $enable_volume == 'true' ]]; then + + default_volume_size=100M + read -p "Provide the size for volume [ default : 100M ]" volume_size + volume_size=${volume_size:-$default_volume_size} + + default_volume_mount_path='/home/mosip/config/' + read -p "Provide the mount path for volume [ default : '/home/mosip/config/' ] : " volume_mount_path + volume_mount_path=${volume_mount_path:-$default_volume_mount_path} + + PVC_CLAIM_NAME='kernel-keygen-keymanager' + KERNEL_KEYGEN_HELM_ARGS="--set persistence.enabled=true \ + --set volumePermissions.enabled=true \ + --set persistence.size=$volume_size \ + --set persistence.mountDir=\"$volume_mount_path\" \ + --set springConfigNameEnv='kernel' \ + --set persistence.pvc_claim_name=\"$PVC_CLAIM_NAME\" \ + " + KERNEL_HELM_ARGS="--set persistence.enabled=true \ + --set volumePermissions.enabled=true \ + --set persistence.mountDir=\"$volume_mount_path\" \ + --set persistence.existingClaim=\"$PVC_CLAIM_NAME\" \ + --set extraEnvVarsCM={'global','config-server-share','artifactory-share'} \ + " + fi + echo "KERNEL KEYGEN HELM ARGS $KERNEL_KEYGEN_HELM_ARGS" + echo "KERNEL HELM ARGS $KERNEL_HELM_ARGS" + echo Running keygenerator. This may take a few minutes.. - helm -n $NS install kernel-keygen mosip/keygen --wait --wait-for-jobs --version $CHART_VERSION -f keygen_values.yaml + helm -n $NS install kernel-keygen mosip/keygen $KERNEL_KEYGEN_HELM_ARGS --wait --wait-for-jobs --version $CHART_VERSION echo Installing keymanager - helm -n $NS install keymanager mosip/keymanager --version $CHART_VERSION + helm -n $NS install keymanager mosip/keymanager $KERNEL_HELM_ARGS --wait --version $CHART_VERSION kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status echo Installed keymanager services diff --git a/deployment/v3/mosip/keymanager/restart.sh b/deployment/v3/mosip/keymanager/restart.sh index f56665af8..1199194b6 100755 --- a/deployment/v3/mosip/keymanager/restart.sh +++ b/deployment/v3/mosip/keymanager/restart.sh @@ -22,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_keymanager # calling function \ No newline at end of file +Restarting_keymanager # calling function diff --git a/deployment/v3/mosip/masterdata-loader/README.md b/deployment/v3/mosip/masterdata-loader/README.md new file mode 100644 index 000000000..02c133167 --- /dev/null +++ b/deployment/v3/mosip/masterdata-loader/README.md @@ -0,0 +1,7 @@ +# Masterdataloader + +## Install +* To incorporate your own master data, modify the `install.sh` with your `GithubBranch`, `GithubRepo` and `XlsfolderPath: /home/mosip/`. +``` +./install.sh +``` diff --git a/deployment/v3/mosip/masterdata-loader/copy_secrets.sh b/deployment/v3/mosip/masterdata-loader/copy_secrets.sh index 1014f12a4..be615ff5f 100755 --- a/deployment/v3/mosip/masterdata-loader/copy_secrets.sh +++ b/deployment/v3/mosip/masterdata-loader/copy_secrets.sh @@ -15,4 +15,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_secrets # calling function \ No newline at end of file +copying_secrets # calling function diff --git a/deployment/v3/mosip/masterdata-loader/install.sh b/deployment/v3/mosip/masterdata-loader/install.sh index d90826249..c67a84df8 100755 --- a/deployment/v3/mosip/masterdata-loader/install.sh +++ b/deployment/v3/mosip/masterdata-loader/install.sh @@ -6,13 +6,13 @@ if [ $# -ge 1 ] ; then export KUBECONFIG=$1 fi +NS=masterdata-loader +CHART_VERSION=0.0.1-develop echo WARNING: This need to be executed only once at the begining for masterdata deployment. If reexecuted in a working env this will reset the whole master_data DB tables resulting in data loss. echo Please skip this if masterdata is already uploaded. read -p "CAUTION: Do you still want to continue(Y/n)" yn if [ $yn = "Y" ] then - NS=masterdata-loader - CHART_VERSION=12.0.1 helm delete masterdata-loader -n $NS echo Create $NS namespace kubectl create ns $NS @@ -33,7 +33,11 @@ if [ $yn = "Y" ] ./copy_secrets.sh echo Loading masterdata - helm -n $NS install masterdata-loader mosip/masterdata-loader --set mosipDataGithubBranch=v1.2.0.1 --version $CHART_VERSION --wait + helm -n $NS install masterdata-loader mosip/masterdata-loader \ + --set mosipDataGithubBranch="develop" \ + --set mosipDataGithubRepo="https://github.com/mosip/mosip-data" \ + --set mosipDataXlsfolderPath="\/home/mosip/mosip-data/mosip_master/xlsx" \ + --version $CHART_VERSION --wait --wait-for-jobs else break diff --git a/deployment/v3/mosip/mock-abis/delete.sh b/deployment/v3/mosip/mock-abis/delete.sh index 2a5e9b227..c7b839ce3 100755 --- a/deployment/v3/mosip/mock-abis/delete.sh +++ b/deployment/v3/mosip/mock-abis/delete.sh @@ -22,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_abis # calling function \ No newline at end of file +deleting_abis # calling function diff --git a/deployment/v3/mosip/mock-abis/install.sh b/deployment/v3/mosip/mock-abis/install.sh index 8348ad66d..b2cb95ed7 100755 --- a/deployment/v3/mosip/mock-abis/install.sh +++ b/deployment/v3/mosip/mock-abis/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=abis -CHART_VERSION=12.0.2 +CHART_VERSION=12.0.x-develop echo Create $NS namespace kubectl create ns $NS diff --git a/deployment/v3/mosip/mock-abis/restart.sh b/deployment/v3/mosip/mock-abis/restart.sh index 16ccc1b84..14391f7bf 100755 --- a/deployment/v3/mosip/mock-abis/restart.sh +++ b/deployment/v3/mosip/mock-abis/restart.sh @@ -8,7 +8,7 @@ fi function Restarting_abis() { NS=abis - kubectl -n $NS rollout restart deploy + kubectl -n $NS rollout restart deploy kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status @@ -22,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_abis # calling function \ No newline at end of file +Restarting_abis # calling function diff --git a/deployment/v3/mosip/mock-mv/delete.sh b/deployment/v3/mosip/mock-mv/delete.sh index 02a2dca32..fb6001cd5 100755 --- a/deployment/v3/mosip/mock-mv/delete.sh +++ b/deployment/v3/mosip/mock-mv/delete.sh @@ -1,6 +1,5 @@ #!/bin/bash # Uninstalls mock mv - function deleting_mockmv() { NS=abis while true; do @@ -22,4 +21,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_mockmv # calling function \ No newline at end of file +deleting_mockmv # calling function diff --git a/deployment/v3/mosip/mock-mv/install.sh b/deployment/v3/mosip/mock-mv/install.sh index d71dde3cb..cef945e5a 100755 --- a/deployment/v3/mosip/mock-mv/install.sh +++ b/deployment/v3/mosip/mock-mv/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=abis -CHART_VERSION=12.0.2 +CHART_VERSION=12.0.x-develop echo Create $NS namespace kubectl create ns $NS @@ -25,7 +25,9 @@ function installing_mockmv() { helm -n $NS install mock-mv mosip/mock-mv --version $CHART_VERSION kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status - echo Intalled mock-mv services + + echo Installed mock-mv services + return 0 } diff --git a/deployment/v3/mosip/mock-mv/restart.sh b/deployment/v3/mosip/mock-mv/restart.sh index 8b1a10814..f872b8f78 100755 --- a/deployment/v3/mosip/mock-mv/restart.sh +++ b/deployment/v3/mosip/mock-mv/restart.sh @@ -22,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_mockmv # calling function \ No newline at end of file +Restarting_mockmv # calling function diff --git a/deployment/v3/mosip/mock-smtp/README.md b/deployment/v3/mosip/mock-smtp/README.md index decd1f4b8..c210d5f8f 100644 --- a/deployment/v3/mosip/mock-smtp/README.md +++ b/deployment/v3/mosip/mock-smtp/README.md @@ -29,4 +29,4 @@ The chart here installs a Mock SMTP and Mock SMS accessed over an https URL. * Install ```sh ./install.sh -``` \ No newline at end of file +``` diff --git a/deployment/v3/mosip/mock-smtp/delete.sh b/deployment/v3/mosip/mock-smtp/delete.sh index e9b540753..9a05c7a5a 100755 --- a/deployment/v3/mosip/mock-smtp/delete.sh +++ b/deployment/v3/mosip/mock-smtp/delete.sh @@ -22,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -mock_smtp # calling function \ No newline at end of file +mock_smtp # calling function diff --git a/deployment/v3/mosip/mock-smtp/install.sh b/deployment/v3/mosip/mock-smtp/install.sh index a36cd32f2..0c6fdb796 100755 --- a/deployment/v3/mosip/mock-smtp/install.sh +++ b/deployment/v3/mosip/mock-smtp/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=mock-smtp -CHART_VERSION=1.0.0 +CHART_VERSION=12.0.x-develop echo Create $NS namespace kubectl create ns $NS @@ -15,7 +15,7 @@ kubectl create ns $NS function mock_smtp() { echo Istio label kubectl label ns $NS istio-injection=enabled --overwrite - # helm repo update + helm repo update echo "Copy configmaps" sed -i 's/\r$//' copy_cm.sh diff --git a/deployment/v3/mosip/mock-smtp/restart.sh b/deployment/v3/mosip/mock-smtp/restart.sh index 108bd68d4..1334aee6d 100755 --- a/deployment/v3/mosip/mock-smtp/restart.sh +++ b/deployment/v3/mosip/mock-smtp/restart.sh @@ -11,6 +11,7 @@ function Restarting_smtp() { kubectl -n $NS rollout restart deploy kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status + echo Restarted mock-smtp services return 0 } @@ -21,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_smtp # calling function \ No newline at end of file +Restarting_smtp # calling function diff --git a/deployment/v3/mosip/mosip-file-server/delete.sh b/deployment/v3/mosip/mosip-file-server/delete.sh index aec896f91..0a566d441 100755 --- a/deployment/v3/mosip/mosip-file-server/delete.sh +++ b/deployment/v3/mosip/mosip-file-server/delete.sh @@ -27,4 +27,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_mfs # calling function \ No newline at end of file +deleting_mfs # calling function diff --git a/deployment/v3/mosip/mosip-file-server/install.sh b/deployment/v3/mosip/mosip-file-server/install.sh index faf2dc255..0120c8754 100755 --- a/deployment/v3/mosip/mosip-file-server/install.sh +++ b/deployment/v3/mosip/mosip-file-server/install.sh @@ -8,7 +8,7 @@ fi NS=mosip-file-server -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS diff --git a/deployment/v3/mosip/mosip-file-server/restart.sh b/deployment/v3/mosip/mosip-file-server/restart.sh index 45d34083d..5e04e509d 100755 --- a/deployment/v3/mosip/mosip-file-server/restart.sh +++ b/deployment/v3/mosip/mosip-file-server/restart.sh @@ -22,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_mfs # calling function \ No newline at end of file +Restarting_mfs # calling function diff --git a/deployment/v3/mosip/mosip-file-server/values.yaml b/deployment/v3/mosip/mosip-file-server/values.yaml new file mode 100644 index 000000000..165e37423 --- /dev/null +++ b/deployment/v3/mosip/mosip-file-server/values.yaml @@ -0,0 +1,24 @@ +image: + registry: docker.io + repository: mosipdev/mosip-file-server + tag: develop + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +regclient: + version: 1.2.1-SNAPSHOT + mountDir: /home/mosip/build_files/ + ## Currently this is hardcoded. Will change in the future + cryptoKey: bBQX230Wskq6XpoZ1c+Ep1D+znxfT89NxLQ7P4KFkc4= + upgradeServerUrl: https://regclient.sandbox.xzy.net + ## Here we check the health of syncdata service. The service must be accessible over internal channel. + healthCheckUrl: http://api-internal.sandbox.v3box1.net/v1/syncdata/actuator/health diff --git a/deployment/v3/mosip/mosipcertmanager/README.md b/deployment/v3/mosip/mosipcertmanager/README.md new file mode 100644 index 000000000..d35d89e33 --- /dev/null +++ b/deployment/v3/mosip/mosipcertmanager/README.md @@ -0,0 +1,17 @@ +# mosipcertmanager +Helm chart for installing mosipcertmanager + +## Introduction +It's a cronjob that checks DBs for partner certificate expiry dates and renews the certificates if expired. + +## Install +RUN Install script +``` +./install.sh +``` + +# TL;DR +```console +$ helm repo add mosip https://mosip.github.io +$ helm install my-release mosip/mosipcertmanager +``` \ No newline at end of file diff --git a/deployment/v3/mosip/mosipcertmanager/copy_cm.sh b/deployment/v3/mosip/mosipcertmanager/copy_cm.sh new file mode 100755 index 000000000..a551f6844 --- /dev/null +++ b/deployment/v3/mosip/mosipcertmanager/copy_cm.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copy configmaps from other namespaces +# DST_NS: Destination (current) namespace + +function copying_cm() { + COPY_UTIL=../../utils/copy_cm_func.sh + DST_NS=mosipcertmanager + + $COPY_UTIL configmap global default $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_cm # calling function + + + diff --git a/deployment/v3/mosip/mosipcertmanager/copy_secrets.sh b/deployment/v3/mosip/mosipcertmanager/copy_secrets.sh new file mode 100755 index 000000000..3b3654353 --- /dev/null +++ b/deployment/v3/mosip/mosipcertmanager/copy_secrets.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copy secrets from other namespaces +# DST_NS: Destination namespace + +function copying_secrets() { + COPY_UTIL=../../utils/copy_cm_func.sh + DST_NS=mosipcertmanager + $COPY_UTIL secret s3 s3 $DST_NS + $COPY_UTIL secret postgres-postgresql postgres $DST_NS + $COPY_UTIL secret keycloak-client-secrets keycloak $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_secrets # calling function \ No newline at end of file diff --git a/deployment/v3/mosip/mosipcertmanager/delete.sh b/deployment/v3/mosip/mosipcertmanager/delete.sh new file mode 100755 index 000000000..b089ab29b --- /dev/null +++ b/deployment/v3/mosip/mosipcertmanager/delete.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Uninstalls print service +## Usage: ./delete.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +function deleting_mosipcertmanager() { + NS=mosipcertmanager + while true; do + read -p "Are you sure you want to delete print helm chart?(Y/n) " yn + if [ $yn = "Y" ] + then + helm -n $NS delete mosipcertmanager + break + else + break + fi + done + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +deleting_mosipcertmanager # calling function diff --git a/deployment/v3/mosip/mosipcertmanager/install.sh b/deployment/v3/mosip/mosipcertmanager/install.sh new file mode 100755 index 000000000..321116361 --- /dev/null +++ b/deployment/v3/mosip/mosipcertmanager/install.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Installs sample print service +## Usage: ./restart.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + + +NS=mosipcertmanager +CHART_VERSION=0.0.1-develop + +echo Create $NS namespace +kubectl create ns $NS + +function installing_mosipcertmanager() { + echo Istio label + kubectl label ns $NS istio-injection=disabled --overwrite + helm repo update + + echo Copy configmaps + sed -i 's/\r$//' copy_cm.sh + ./copy_cm.sh + + echo Copy secrets + sed -i 's/\r$//' copy_secrets.sh + ./copy_secrets.sh + + echo Installing mosipcertmanager + helm -n $NS install mosipcertmanager mosip/mosipcertmanager --wait --version $CHART_VERSION + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +installing_mosipcertmanager # calling function diff --git a/deployment/v3/mosip/packetmanager/delete.sh b/deployment/v3/mosip/packetmanager/delete.sh index af4c734da..825abecf3 100755 --- a/deployment/v3/mosip/packetmanager/delete.sh +++ b/deployment/v3/mosip/packetmanager/delete.sh @@ -27,4 +27,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_packetmanager # calling function \ No newline at end of file +deleting_packetmanager # calling function diff --git a/deployment/v3/mosip/packetmanager/restart.sh b/deployment/v3/mosip/packetmanager/restart.sh index 550dff7de..c5068b29a 100755 --- a/deployment/v3/mosip/packetmanager/restart.sh +++ b/deployment/v3/mosip/packetmanager/restart.sh @@ -11,6 +11,7 @@ function Restarting_packetmanager() { kubectl -n $NS rollout restart deploy kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status + echo Restarted packetmanager services return 0 } @@ -21,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_packetmanager # calling function \ No newline at end of file +Restarting_packetmanager # calling function diff --git a/deployment/v3/mosip/partner-onboarder/README.md b/deployment/v3/mosip/partner-onboarder/README.md index 3c2105d40..b5712d727 100644 --- a/deployment/v3/mosip/partner-onboarder/README.md +++ b/deployment/v3/mosip/partner-onboarder/README.md @@ -39,6 +39,3 @@ Loads certs for default partners for sandbox. Refer [mosip-onboarding repo](http 3. Upload of certificate will not be allowed to update other domain certificate Resolution: This is expected when you try to upload `ida-cred` certificate twice. It should only run once and if you see this error while uploading a second time it can be ignored as the cert is already present. - - - diff --git a/deployment/v3/mosip/partner-onboarder/copy_cm.sh b/deployment/v3/mosip/partner-onboarder/copy_cm.sh index f7fd8e571..95c00f665 100755 --- a/deployment/v3/mosip/partner-onboarder/copy_cm.sh +++ b/deployment/v3/mosip/partner-onboarder/copy_cm.sh @@ -18,4 +18,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_cm # calling function \ No newline at end of file +copying_cm # calling function diff --git a/deployment/v3/mosip/partner-onboarder/copy_secrets.sh b/deployment/v3/mosip/partner-onboarder/copy_secrets.sh index 2ae4ff548..ea9000590 100755 --- a/deployment/v3/mosip/partner-onboarder/copy_secrets.sh +++ b/deployment/v3/mosip/partner-onboarder/copy_secrets.sh @@ -18,4 +18,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_secrets # calling function \ No newline at end of file +copying_secrets # calling function diff --git a/deployment/v3/mosip/partner-onboarder/install.sh b/deployment/v3/mosip/partner-onboarder/install.sh index 0b3306ab1..bc799ae3d 100755 --- a/deployment/v3/mosip/partner-onboarder/install.sh +++ b/deployment/v3/mosip/partner-onboarder/install.sh @@ -21,7 +21,21 @@ if [ "$flag" = "n" ]; then fi NS=onboarder -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop + +echo "Do you have public domain & valid SSL? (Y/n) " +echo "Y: if you have public domain & valid ssl certificate" +echo "n: If you don't have a public domain and a valid SSL certificate. Note: It is recommended to use this option only in development environments." +read -p "" flag + +if [ -z "$flag" ]; then + echo "'flag' was provided; EXITING;" + exit 1; +fi +ENABLE_INSECURE='' +if [ "$flag" = "n" ]; then + ENABLE_INSECURE='--set onboarding.configmaps.onboarding.ENABLE_INSECURE=true'; +fi echo Create $NS namespace kubectl create ns $NS diff --git a/deployment/v3/mosip/pms-migration-utility/copy_cm.sh b/deployment/v3/mosip/pms-migration-utility/copy_cm.sh index 478f2bbcb..5e00f883f 100755 --- a/deployment/v3/mosip/pms-migration-utility/copy_cm.sh +++ b/deployment/v3/mosip/pms-migration-utility/copy_cm.sh @@ -18,4 +18,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_cm # calling function \ No newline at end of file +copying_cm # calling function diff --git a/deployment/v3/mosip/pms/copy_cm.sh b/deployment/v3/mosip/pms/copy_cm.sh index 0d3e9be49..9fbd381ff 100755 --- a/deployment/v3/mosip/pms/copy_cm.sh +++ b/deployment/v3/mosip/pms/copy_cm.sh @@ -18,4 +18,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_cm # calling function \ No newline at end of file +copying_cm # calling function diff --git a/deployment/v3/mosip/pms/delete.sh b/deployment/v3/mosip/pms/delete.sh index 6d480a977..1db0145b7 100755 --- a/deployment/v3/mosip/pms/delete.sh +++ b/deployment/v3/mosip/pms/delete.sh @@ -15,6 +15,7 @@ function deleting_pms() { helm -n $NS delete pms-partner helm -n $NS delete pms-policy helm -n $NS delete pmp-ui + helm -n $NS delete pmp-reactjs-ui break else break @@ -29,4 +30,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_pms # calling function \ No newline at end of file +deleting_pms # calling function diff --git a/deployment/v3/mosip/pms/install.sh b/deployment/v3/mosip/pms/install.sh index 4213e2ed1..a20b848f6 100755 --- a/deployment/v3/mosip/pms/install.sh +++ b/deployment/v3/mosip/pms/install.sh @@ -7,11 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=pms -CHART_VERSION=12.0.1 -PMP_UI_CHART_VERSION=12.0.2 - -API_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-api-internal-host}) -PMP_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-pmp-host}) +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS @@ -27,21 +23,39 @@ function installing_pms() { INTERNAL_API_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-api-internal-host}) PMP_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-pmp-host}) + PMP_NEW_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-pmp-reactjs-ui-new-host}) + + PARTNER_MANAGER_SERVICE_NAME="pms-partner" + POLICY_MANAGER_SERVICE_NAME="pms-policy" echo Installing partner manager - helm -n $NS install pms-partner mosip/pms-partner --set istio.corsPolicy.allowOrigins\[0\].prefix=https://$PMP_HOST --version $CHART_VERSION + helm -n $NS install $PARTNER_MANAGER_SERVICE_NAME mosip/pms-partner \ + --set istio.corsPolicy.allowOrigins\[0\].prefix=https://$PMP_HOST \ + --set istio.corsPolicy.allowOrigins\[1\].prefix=https://$PMP_NEW_HOST \ + --version $CHART_VERSION echo Installing policy manager - helm -n $NS install pms-policy mosip/pms-policy --set istio.corsPolicy.allowOrigins\[0\].prefix=https://$PMP_HOST --version $CHART_VERSION + helm -n $NS install $POLICY_MANAGER_SERVICE_NAME mosip/pms-policy \ + --set istio.corsPolicy.allowOrigins\[0\].prefix=https://$PMP_HOST \ + --set istio.corsPolicy.allowOrigins\[1\].prefix=https://$PMP_NEW_HOST \ + --version $CHART_VERSION echo Installing pmp-ui - helm -n $NS install pmp-ui mosip/pmp-ui --set pmp.apiUrl=https://$INTERNAL_API_HOST/ --set istio.hosts=["$PMP_HOST"] --version $PMP_UI_CHART_VERSION + helm -n $NS install pmp-ui mosip/pmp-ui --set pmp.apiUrl=https://$INTERNAL_API_HOST/ --set istio.hosts=["$PMP_HOST"] --version $CHART_VERSION + + echo Installing pmp-reactjs-ui-new + helm -n $NS install pmp-reactjs-ui mosip/pmp-reactjs-ui \ + --set pmp_new.react_app_partner_manager_api_base_url="https://$INTERNAL_API_HOST/v1/partnermanager" \ + --set pmp_new.react_app_policy_manager_api_base_url="https://$INTERNAL_API_HOST/v1/policymanager" \ + --set pmp_new.pms_partner_manager_internal_service_url="http://$PARTNER_MANAGER_SERVICE_NAME.$NS/v1/partnermanager" \ + --set pmp_new.pms_policy_manager_internal_service_url="http://$POLICY_MANAGER_SERVICE_NAME.$NS/v1/policymanager" \ + --set istio.hosts=["$PMP_NEW_HOST"] --version $CHART_VERSION kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status echo Installed pms services - echo "Admin portal URL: https://$PMP_HOST/pmp-ui/" + echo "Partner management portal URL: https://$PMP_HOST/pmp-ui/" return 0 } diff --git a/deployment/v3/mosip/pms/restart.sh b/deployment/v3/mosip/pms/restart.sh index 7f83a7c9f..8d11b9581 100755 --- a/deployment/v3/mosip/pms/restart.sh +++ b/deployment/v3/mosip/pms/restart.sh @@ -22,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_pms # calling function \ No newline at end of file +Restarting_pms # calling function diff --git a/deployment/v3/mosip/prereg/delete.sh b/deployment/v3/mosip/prereg/delete.sh index c84d782c7..dc0714790 100755 --- a/deployment/v3/mosip/prereg/delete.sh +++ b/deployment/v3/mosip/prereg/delete.sh @@ -1,6 +1,5 @@ #!/bin/bash # Uninstalls all prereg helm charts - function deleting_prereg() { while true; do read -p "Are you sure you want to delete all prereg helm charts?(Y/n) " yn @@ -8,7 +7,6 @@ function deleting_prereg() { then kubectl -n prereg delete -f rate-control-envoyfilter.yaml helm -n prereg delete prereg-gateway - helm -n prereg delete prereg-captcha helm -n prereg delete prereg-application helm -n prereg delete prereg-batchjob helm -n prereg delete prereg-booking @@ -28,4 +26,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_prereg # calling function \ No newline at end of file +deleting_prereg # calling function diff --git a/deployment/v3/mosip/prereg/install.sh b/deployment/v3/mosip/prereg/install.sh index 79276563c..6d7072be7 100755 --- a/deployment/v3/mosip/prereg/install.sh +++ b/deployment/v3/mosip/prereg/install.sh @@ -29,9 +29,6 @@ function installing_prereg() { echo Install prereg-gateway helm -n $NS install prereg-gateway mosip/prereg-gateway --set istio.hosts[0]=$PREREG_HOST --version $CHART_VERSION - echo Installing prereg-captcha - helm -n $NS install prereg-captcha mosip/prereg-captcha --version $CHART_VERSION - echo Installing prereg-application helm -n $NS install prereg-application mosip/prereg-application --version $CHART_VERSION diff --git a/deployment/v3/mosip/prereg/restart.sh b/deployment/v3/mosip/prereg/restart.sh index a8df25e79..1f6f9fab2 100755 --- a/deployment/v3/mosip/prereg/restart.sh +++ b/deployment/v3/mosip/prereg/restart.sh @@ -6,7 +6,6 @@ if [ $# -ge 1 ] ; then export KUBECONFIG=$1 fi - function Restarting_prereg() { NS=prereg kubectl -n $NS rollout restart deploy @@ -23,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_prereg # calling function \ No newline at end of file +Restarting_prereg # calling function diff --git a/deployment/v3/mosip/print/delete.sh b/deployment/v3/mosip/print/delete.sh index ab0022578..3fa35effc 100755 --- a/deployment/v3/mosip/print/delete.sh +++ b/deployment/v3/mosip/print/delete.sh @@ -27,4 +27,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_print # calling function \ No newline at end of file +deleting_print # calling function diff --git a/deployment/v3/mosip/print/install.sh b/deployment/v3/mosip/print/install.sh index 00c4e120d..7b64b9092 100755 --- a/deployment/v3/mosip/print/install.sh +++ b/deployment/v3/mosip/print/install.sh @@ -9,7 +9,6 @@ fi NS=print CHART_VERSION=12.0.1 - echo Create $NS namespace kubectl create ns $NS diff --git a/deployment/v3/mosip/print/restart.sh b/deployment/v3/mosip/print/restart.sh index d83382d15..76fd74423 100755 --- a/deployment/v3/mosip/print/restart.sh +++ b/deployment/v3/mosip/print/restart.sh @@ -6,12 +6,12 @@ if [ $# -ge 1 ] ; then export KUBECONFIG=$1 fi - function Restarting_print() { NS=print kubectl -n $NS rollout restart deploy kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status + echo Restarted print services return 0 } diff --git a/deployment/v3/mosip/regclient/README.md b/deployment/v3/mosip/regclient/README.md index 2464481b1..4bec9fd41 100644 --- a/deployment/v3/mosip/regclient/README.md +++ b/deployment/v3/mosip/regclient/README.md @@ -10,6 +10,9 @@ The chart here installs a regclient downloader accessed over an http URL. ```sh ./install.sh ``` +## Download +The download URL will be available at `https://your-reglient-host`. Example: `https://reglient.sandbox.xyz.net`. + ## Customization If you want to add extra environment variables to the regclient docker do follow the below mentioned steps. 1. Add the variables in extraEnvVars section of the sample 'values.yaml.sample' file given. diff --git a/deployment/v3/mosip/regclient/install.sh b/deployment/v3/mosip/regclient/install.sh index a9d899f75..a0b7e2794 100755 --- a/deployment/v3/mosip/regclient/install.sh +++ b/deployment/v3/mosip/regclient/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=regclient -CHART_VERSION=12.0.2 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS diff --git a/deployment/v3/mosip/regclient/values.yaml.sample b/deployment/v3/mosip/regclient/values.yaml.sample new file mode 100644 index 000000000..417a33cca --- /dev/null +++ b/deployment/v3/mosip/regclient/values.yaml.sample @@ -0,0 +1,3 @@ +extraEnvVars: + - name: client_certificate_env + value: mosip_cer.cer diff --git a/deployment/v3/mosip/regproc/delete.sh b/deployment/v3/mosip/regproc/delete.sh index 02a382d1d..ad4824e93 100755 --- a/deployment/v3/mosip/regproc/delete.sh +++ b/deployment/v3/mosip/regproc/delete.sh @@ -1,6 +1,5 @@ #!/bin/bash # Uninstalls all regproc helm charts - function deleting_regproc() { NS=regproc while true; do @@ -22,6 +21,7 @@ function deleting_regproc() { helm -n $NS delete regproc-notifier helm -n $NS delete regproc-trans helm -n $NS delete regproc-reprocess + helm -n $NS delete regproc-landingzone break else break @@ -36,4 +36,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_regproc # calling function \ No newline at end of file +deleting_regproc # calling function diff --git a/deployment/v3/mosip/regproc/install.sh b/deployment/v3/mosip/regproc/install.sh index 806c628fe..2e08dd181 100755 --- a/deployment/v3/mosip/regproc/install.sh +++ b/deployment/v3/mosip/regproc/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=regproc -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS @@ -70,7 +70,8 @@ function installing_regproc() { helm -n $NS install regproc-landingzone mosip/regproc-landingzone --version $CHART_VERSION kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status - echo Intalled regproc services + + echo Installed regproc services return 0 } diff --git a/deployment/v3/mosip/regproc/restart.sh b/deployment/v3/mosip/regproc/restart.sh index 84ae32390..cf622fa0b 100755 --- a/deployment/v3/mosip/regproc/restart.sh +++ b/deployment/v3/mosip/regproc/restart.sh @@ -6,7 +6,6 @@ if [ $# -ge 1 ] ; then export KUBECONFIG=$1 fi - function Restarting_regproc() { NS=regproc kubectl -n $NS rollout restart deploy @@ -23,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_regproc # calling function \ No newline at end of file +Restarting_regproc # calling function diff --git a/deployment/v3/mosip/regproc/topic/create_topics.sh b/deployment/v3/mosip/regproc/topic/create_topics.sh new file mode 100755 index 000000000..a495c6857 --- /dev/null +++ b/deployment/v3/mosip/regproc/topic/create_topics.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# + +function create_topics() { + read -p "Enter IAM username: " iam_user + + # This username is hardcoded in sql scripts + DB_PWD=$(kubectl get secret --namespace postgres db-common-secrets -o jsonpath={.data.db-dbuser-password} | base64 --decode) + DB_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-api-internal-host}) + DB_PORT=5432 + + echo Creating topics + cd lib + python3 create_topics.py $DB_HOST $DB_PWD $iam_user ../topics.xlsx +return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +create_topics # calling function diff --git a/deployment/v3/mosip/resident/copy_cm.sh b/deployment/v3/mosip/resident/copy_cm.sh index 8fdbecbfb..bcb63d042 100755 --- a/deployment/v3/mosip/resident/copy_cm.sh +++ b/deployment/v3/mosip/resident/copy_cm.sh @@ -18,4 +18,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_cm # calling function \ No newline at end of file +copying_cm # calling function diff --git a/deployment/v3/mosip/resident/delete.sh b/deployment/v3/mosip/resident/delete.sh index 8661168a4..6be4efb9d 100755 --- a/deployment/v3/mosip/resident/delete.sh +++ b/deployment/v3/mosip/resident/delete.sh @@ -1,6 +1,5 @@ #!/bin/bash # Uninstalls resident - function deleting_resident() { NS=resident while true; do @@ -9,6 +8,7 @@ function deleting_resident() { then helm -n $NS delete resident helm -n $NS delete resident-ui +# kubectl delete -n $NS -f resident-ui break else break diff --git a/deployment/v3/mosip/resident/install.sh b/deployment/v3/mosip/resident/install.sh index ded8fe9ff..f8c819a75 100755 --- a/deployment/v3/mosip/resident/install.sh +++ b/deployment/v3/mosip/resident/install.sh @@ -7,8 +7,8 @@ if [ $# -ge 1 ] ; then fi NS=resident -CHART_VERSION=12.0.1 -RESIDENT_UI_CHART_VERSION=0.0.1 +CHART_VERSION=0.0.1-develop +RESIDENT_UI_CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS @@ -25,14 +25,12 @@ function installing_resident() { echo Copy secrets sed -i 's/\r$//' copy_secrets.sh ./copy_secrets.sh - echo Setting up dummy values for Resident OIDC Client ID kubectl create secret generic resident-oidc-onboarder-key -n $NS --from-literal=resident-oidc-clientid='' --dry-run=client -o yaml | kubectl apply -f - ./copy_cm_func.sh secret resident-oidc-onboarder-key resident config-server kubectl -n config-server set env --keys=resident-oidc-clientid --from secret/resident-oidc-onboarder-key deployment/config-server --prefix=SPRING_CLOUD_CONFIG_SERVER_OVERRIDES_ kubectl -n config-server get deploy -o name | xargs -n1 -t kubectl -n config-server rollout status - echo "Do you have public domain & valid SSL? (Y/n) " echo "Y: if you have public domain & valid ssl certificate" echo "n: If you don't have a public domain and a valid SSL certificate. Note: It is recommended to use this option only in development environments." @@ -50,6 +48,7 @@ function installing_resident() { API_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-api-internal-host}) RESIDENT_HOST=$(kubectl get cm global -o jsonpath={.data.mosip-resident-host}) + echo Installing Resident helm -n $NS install resident mosip/resident --set istio.corsPolicy.allowOrigins\[0\].prefix=https://$RESIDENT_HOST --version $CHART_VERSION $ENABLE_INSECURE diff --git a/deployment/v3/mosip/resident/restart.sh b/deployment/v3/mosip/resident/restart.sh index 971281dbb..ba0e7d86f 100755 --- a/deployment/v3/mosip/resident/restart.sh +++ b/deployment/v3/mosip/resident/restart.sh @@ -6,12 +6,12 @@ if [ $# -ge 1 ] ; then export KUBECONFIG=$1 fi - function Restarting_resident() { NS=resident kubectl -n $NS rollout restart deploy kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status + echo Restarted resident services return 0 } @@ -22,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_resident # calling function \ No newline at end of file +Restarting_resident # calling function diff --git a/deployment/v3/mosip/restart-cron/README.md b/deployment/v3/mosip/restart-cron/README.md index fb6751faf..0ce99d996 100644 --- a/deployment/v3/mosip/restart-cron/README.md +++ b/deployment/v3/mosip/restart-cron/README.md @@ -3,11 +3,13 @@ ## Introduction RESTART_CRON chart deploys a CronJob that runs on a schedule specified in the values.yaml file. The CronJob restarts deployments in the specified namespaces using the kubectl rollout restart command and waits for them to reach the desired state using the kubectl rollout status command.. -For now this cronjob is being used to restart packetcreator and authdemo service in a cluster, It can be used to restart other services aswell. +For now this cronjob is being used to restart idgenerator service in a cluster, It can be used to restart other services like packetcreator and authdemo as well. + +Idgenerator service will restart every after four hour in a cluster. ## Prerequisites -* Auth demo, Packetcreator and DSLRIG to be running in the same cluster. -* If Auth demo and Packetcreator service is not running in the same cluster then update the values.yaml file by enabling the only service which is present or which you want to restart. +* Auth demo, Packetcreator, Idgenerator and DSLRIG to be running in the same cluster. +* If Auth demo, Idgenerator and Packetcreator service is not running in the same cluster then update the values.yaml file by enabling the only service which is present or which you want to restart. * Set `values.yaml` to run cronjob for restarting specific services. * run `./install.sh`. diff --git a/deployment/v3/mosip/restart-cron/install.sh b/deployment/v3/mosip/restart-cron/install.sh index 92e94493b..0ec9f3950 100755 --- a/deployment/v3/mosip/restart-cron/install.sh +++ b/deployment/v3/mosip/restart-cron/install.sh @@ -13,7 +13,7 @@ echo Create $NS namespace kubectl create ns $NS function installing_restart-cron() { - echo "This script installs cronjob which restarts packetcreator and authdemo service and other services based on configurations in value.yaml file, Do you want to install? (Y/n) " + echo "This script installs cronjob which restarts packetcreator, Idgenerator and authdemo service and other services based on configurations in value.yaml file, Do you want to install? (Y/n) " echo "Y: if you wish to install this cronjob in your cluster" echo "n: if you don't want to install this cronjob in your cluster" read -p "" flag @@ -26,27 +26,13 @@ function installing_restart-cron() { read -p "Is values.yaml for restart-cron chart set correctly as part of Pre-requisites?(Y/n) " yn; if [ $yn = "Y" ]; then - read -p "Please enter the time(hr) to run the cronjob every day (time: 0-23) : " time - if [ -z "$time" ]; then - echo "ERROT: Time cannot be empty; EXITING;"; - exit 1; - fi - if ! [ $time -eq $time ] 2>/dev/null; then - echo "ERROR: Time $time is not a number; EXITING;"; - exit 1; - fi - if [ $time -gt 23 ] || [ $time -lt 0 ] ; then - echo "ERROR: Time should be in range ( 0-23 ); EXITING;"; - exit 1; - fi - echo Istio label kubectl label ns $NS istio-injection=disabled --overwrite helm repo update - echo Installing restart-cron + echo Installing restart-cron idgenerator helm -n $NS install restart-cron mosip/restart-cron \ - --set schedule.crontime="0 $time * * *" \ + --set schedule.crontime="0 */4 * * 1-5" \ -f values.yaml \ --version $CHART_VERSION echo Installed restart-cron. diff --git a/deployment/v3/mosip/restart-cron/values.yaml b/deployment/v3/mosip/restart-cron/values.yaml index 730c7114f..a9c9f9027 100644 --- a/deployment/v3/mosip/restart-cron/values.yaml +++ b/deployment/v3/mosip/restart-cron/values.yaml @@ -1,7 +1,10 @@ namespaces: - name: packetcreator - enabled: true + enabled: false deploymentName: packetcreator - name: authdemo + enabled: false + deploymentName: authdemo + - name: kernel enabled: true - deploymentName: authdemo \ No newline at end of file + deploymentName: idgenerator diff --git a/deployment/v3/mosip/tusd/copy_cm.sh b/deployment/v3/mosip/tusd/copy_cm.sh index 25f50c00a..4ff8b236f 100755 --- a/deployment/v3/mosip/tusd/copy_cm.sh +++ b/deployment/v3/mosip/tusd/copy_cm.sh @@ -19,6 +19,3 @@ set -o nounset ## set -u : exit the script if you try to use an uninitialised set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes copying_cm # calling function - - - diff --git a/deployment/v3/mosip/tusd/install.sh b/deployment/v3/mosip/tusd/install.sh index 265376ea2..0156b0f0e 100755 --- a/deployment/v3/mosip/tusd/install.sh +++ b/deployment/v3/mosip/tusd/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=tusd -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS diff --git a/deployment/v3/mosip/websub/delete.sh b/deployment/v3/mosip/websub/delete.sh index 9b6d56fe0..ff5108eac 100755 --- a/deployment/v3/mosip/websub/delete.sh +++ b/deployment/v3/mosip/websub/delete.sh @@ -28,4 +28,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_websub # calling function \ No newline at end of file +deleting_websub # calling function diff --git a/deployment/v3/mosip/websub/restart.sh b/deployment/v3/mosip/websub/restart.sh index c7ebb0f4a..f9184180b 100755 --- a/deployment/v3/mosip/websub/restart.sh +++ b/deployment/v3/mosip/websub/restart.sh @@ -6,12 +6,12 @@ if [ $# -ge 1 ] ; then export KUBECONFIG=$1 fi - function Restarting_websub() { NS=websub kubectl -n $NS rollout restart deploy kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status + echo Restarted websub services return 0 } @@ -22,4 +22,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -Restarting_websub # calling function \ No newline at end of file +Restarting_websub # calling function diff --git a/deployment/v3/terraform/aws/README.md b/deployment/v3/terraform/aws/README.md new file mode 100644 index 000000000..c014f3b20 --- /dev/null +++ b/deployment/v3/terraform/aws/README.md @@ -0,0 +1,100 @@ +# Fetch variables via ENV variables + +``` +$ export TF_VAR_CLUSTER_NAME=dev +$ export TF_LOG="DEBUG" +$ export TF_LOG_PATH="/tmp/terraform.log" +``` + +* TF_VAR_ : is a syntax +* CLUSTER_NAME=dev : is variable and its value + + +# Terraform Setup for MOSIP Infrastructure + +## Overview +This Terraform configuration script set up the infrastructure for MOSIP (Modular Open Source Identity Platform) on AWS. +The setup includes security groups, an NGINX server, and a Kubernetes (K8S) cluster. + +## Requirements +* Terraform version: `v1.8.4` +* AWS Account +* AWS CLI configured with appropriate credentials + ``` + $ export AWS_ACCESS_KEY_ID= + $ export AWS_SECRET_ACCESS_KEY= + $ export TF_VAR_SSH_PRIVATE_KEY= + ``` + +## Files +* `main.tf`: Main Terraform script that defines providers, resources, and output values. +* `variables.tf`: Defines variables used in the Terraform scripts. +* `outputs.tf`: Provides the output values. +* `locals.tf`: Defines a local variable `SECURITY_GROUP` containing configuration parameters required for setting up security groups for Nginx and Kubernetes cluster nodes. +* `env.tfvars`: tfvars file is used to set the actual values of the variables. + +## Setup +* Initialize Terraform. + ``` + terraform init + ``` +* Review and modify variable values: + * Ensure `locals.tf` contains correct values for your setup. + * Update values in `env.tfvars` as per your organization requirement. +* Terraform validate & plan the terraform scripts: + ``` + terraform validate + ``` + ``` + terraform plan -var-file="./env.tfvars + ``` +* Apply the Terraform configuration: + ``` + terraform apply -var-file="./env.tfvars + ``` + +## Destroy +To destroy AWS resources, follow the steps below: +* Ensure to have `terraform.tfstate` file. + ``` + terraform destroy -var-file=./env.tfvars + ``` + +## Modules + +#### aws-resource-creation +This module is responsible for creating the AWS resources needed for the MOSIP platform, including security groups, an NGINX server, and a Kubernetes cluster nodes. + +* Inputs: + * `CLUSTER_NAME`: The name of the Kubernetes cluster. + * `AWS_PROVIDER_REGION`: The AWS region for resource creation. + * `SSH_KEY_NAME`: The name of the SSH key for accessing instances. + * `K8S_INSTANCE_TYPE`: The instance type for Kubernetes nodes. + * `NGINX_INSTANCE_TYPE`: The instance type for the NGINX server. + * `MOSIP_DOMAIN`: The domain name for the MOSIP platform. + * `ZONE_ID`: The Route 53 hosted zone ID. + * `AMI`: The Amazon Machine Image ID for the instances. + * `SECURITY_GROUP`: Security group configurations. + +#### nginx-setup +This module sets up NGINX and configures it with the provided domain and SSL certificates. + +* Inputs: + * `NGINX_PUBLIC_IP`: The public IP address of the NGINX server. + * `MOSIP_DOMAIN`: The domain name for the MOSIP platform. + * `MOSIP_K8S_CLUSTER_NODES_PRIVATE_IP_LIST`: List of private IP addresses of the Kubernetes nodes. + * `MOSIP_PUBLIC_DOMAIN_LIST`: List of public domain names. + * `CERTBOT_EMAIL`: The email ID for SSL certificate generation. + * `SSH_KEY_NAME`: SSH private key used for login (i.e., file content of SSH pem key). + +## Outputs +The following outputs are provided: + +* `K8S_CLUSTER_PUBLIC_IPS`: The public IP addresses of the Kubernetes cluster nodes. +* `K8S_CLUSTER_PRIVATE_IPS`: The private IP addresses of the Kubernetes cluster nodes. +* `NGINX_PUBLIC_IP`: The public IP address of the NGINX server. +* `NGINX_PRIVATE_IP`: The private IP address of the NGINX server. +* `MOSIP_NGINX_SG_ID`: The security group ID for the NGINX server. +* `MOSIP_K8S_SG_ID`: The security group ID for the Kubernetes cluster. +* `MOSIP_K8S_CLUSTER_NODES_PRIVATE_IP_LIST`: The private IP addresses of the Kubernetes cluster nodes. +* `MOSIP_PUBLIC_DOMAIN_LIST`: The public domain names. diff --git a/deployment/v3/terraform/aws/env.tfvars b/deployment/v3/terraform/aws/env.tfvars new file mode 100644 index 000000000..4942a5b19 --- /dev/null +++ b/deployment/v3/terraform/aws/env.tfvars @@ -0,0 +1,9 @@ +MOSIP_DOMAIN = "tf5.mosip.net" +MOSIP_EMAIL_ID = "syed.salman@technoforte.co.in" +AWS_PROVIDER_REGION = "ap-south-1" +CLUSTER_NAME = "TF5" +K8S_INSTANCE_TYPE = "t2.micro" +NGINX_INSTANCE_TYPE = "t2.micro" +ZONE_ID = "Z090954828SJIEL6P5406" +AMI = "ami-0a7cf821b91bcccbc" +SSH_KEY_NAME = "mosip-aws" diff --git a/deployment/v3/terraform/aws/locals.tf b/deployment/v3/terraform/aws/locals.tf new file mode 100644 index 000000000..5762f0114 --- /dev/null +++ b/deployment/v3/terraform/aws/locals.tf @@ -0,0 +1,80 @@ +locals { + SECURITY_GROUP = { + NGINX_SECURITY_GROUP = [ + { + description : "SSH login port" + from_port : 22, + to_port : 22, + protocol : "TCP", + cidr_blocks = ["0.0.0.0/0"], + ipv6_cidr_blocks = ["::/0"] + }, + { + description : "HTTP port" + from_port : 80, + to_port : 80, + protocol : "TCP", + cidr_blocks = ["0.0.0.0/0"], + ipv6_cidr_blocks = ["::/0"] + }, + { + description : "HTTPS port" + from_port : 443, + to_port : 443, + protocol : "TCP", + cidr_blocks = ["0.0.0.0/0"], + ipv6_cidr_blocks = ["::/0"] + }, + { + description : "Minio console port" + from_port : 9000, + to_port : 9000, + protocol : "TCP", + cidr_blocks = ["0.0.0.0/0"], + ipv6_cidr_blocks = ["::/0"] + }, + { + description : "Postgres port" + from_port : 5432, + to_port : 5432, + protocol : "TCP", + cidr_blocks = ["0.0.0.0/0"], + ipv6_cidr_blocks = ["::/0"] + } + ] + K8S_SECURITY_GROUP = [ + { + description : "K8s port" + from_port : 6443, + to_port : 6443, + protocol : "TCP", + cidr_blocks = ["0.0.0.0/0"], + ipv6_cidr_blocks = ["::/0"] + }, + { + description : "SSH login port" + from_port : 22, + to_port : 22, + protocol : "TCP", + cidr_blocks = ["0.0.0.0/0"], + ipv6_cidr_blocks = ["::/0"] + }, + { + description : "HTTP port" + from_port : 80, + to_port : 80, + protocol : "TCP", + cidr_blocks = ["0.0.0.0/0"], + ipv6_cidr_blocks = ["::/0"] + }, + { + description : "HTTPS port" + from_port : 443, + to_port : 443, + protocol : "TCP", + cidr_blocks = ["0.0.0.0/0"], + ipv6_cidr_blocks = ["::/0"] + } + ] + } +} \ No newline at end of file diff --git a/deployment/v3/terraform/aws/main.tf b/deployment/v3/terraform/aws/main.tf new file mode 100644 index 000000000..0f589668b --- /dev/null +++ b/deployment/v3/terraform/aws/main.tf @@ -0,0 +1,42 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.48.0" + } + } +} + +# provider "aws" { +# Profile `default` means it will take credentials AWS_SITE_KEY & AWS_SECRET_EKY from ~/.aws/config under `default` section. +# profile = "default" +# region = "ap-south-1" +# } +provider "aws" { + region = var.AWS_PROVIDER_REGION +} + +module "aws-resource-creation" { + source = "./modules/aws-resource-creation" + CLUSTER_NAME = var.CLUSTER_NAME + AWS_PROVIDER_REGION = var.AWS_PROVIDER_REGION + SSH_KEY_NAME = var.SSH_KEY_NAME + K8S_INSTANCE_TYPE = var.K8S_INSTANCE_TYPE + NGINX_INSTANCE_TYPE = var.NGINX_INSTANCE_TYPE + MOSIP_DOMAIN = var.MOSIP_DOMAIN + ZONE_ID = var.ZONE_ID + AMI = var.AMI + + SECURITY_GROUP = local.SECURITY_GROUP +} + +module "nginx-setup" { + depends_on = [module.aws-resource-creation] + source = "./modules/nginx-setup" + NGINX_PUBLIC_IP = module.aws-resource-creation.NGINX_PUBLIC_IP + MOSIP_DOMAIN = var.MOSIP_DOMAIN + MOSIP_K8S_CLUSTER_NODES_PRIVATE_IP_LIST = module.aws-resource-creation.MOSIP_K8S_CLUSTER_NODES_PRIVATE_IP_LIST + MOSIP_PUBLIC_DOMAIN_LIST = module.aws-resource-creation.MOSIP_PUBLIC_DOMAIN_LIST + CERTBOT_EMAIL = var.MOSIP_EMAIL_ID + SSH_PRIVATE_KEY = var.SSH_PRIVATE_KEY +} diff --git a/deployment/v3/terraform/aws/modules/aws-resource-creation/README.md b/deployment/v3/terraform/aws/modules/aws-resource-creation/README.md new file mode 100644 index 000000000..477da3903 --- /dev/null +++ b/deployment/v3/terraform/aws/modules/aws-resource-creation/README.md @@ -0,0 +1,98 @@ +# Terraform Script for AWS Infrastructure with Certbot and NGINX + +## Overview +This Terraform script sets up an AWS infrastructure that includes: + +* IAM roles and policies for Certbot to modify Route 53 DNS records. +* Security groups for NGINX and Kubernetes instances. +* EC2 instances for NGINX and Kubernetes. +* Route 53 DNS records for managing domain names. +* Certbot for SSL certificate generation. + +## Requirements + +* Terraform version: `v1.8.4` +* AWS Account +* AWS CLI configured with appropriate credentials + ``` + $ export AWS_ACCESS_KEY_ID= + $ export AWS_SECRET_ACCESS_KEY= + ``` +* Ensure SSH key created for accessing EC2 instances on AWS. + +## Files +* `certbot-ssl-certgen.tf`: Defines IAM roles, policies, and instance profiles for Certbot. +* `aws.tfvars`: Contains variable values for AWS infrastructure configuration. +* `main.tf`: Main Terraform script that defines providers, resources, and output values. +* `variables.tf`: Defines variables used in the Terraform scripts. + +## Setup +* Initialize Terraform + ``` + terraform init + ``` +* Review and modify variable values: + * Ensure `aws.tfvars` contains correct values for your setup. + * Verify `variables.tf` for any additional configuration needs. +* Terraform validate & plan the terraform scripts: + ``` + terraform validate + ``` + ``` + terraform plan -var-file="aws.tfvars" + ``` +* Apply the Terraform configuration: + ``` + terraform apply -var-file="aws.tfvars" + ``` + +## Destroy +To destroy AWS resources, follow the steps below: +* Ensure to have `terraform.tfstate` file. + ``` + terraform destroy + ``` + +## Terraform Scripts + +#### certbot-ssl-certgen.tf +* Defines resources for setting up IAM roles and policies for Certbot: + * `aws_iam_role.certbot_role`: IAM role for Certbot with EC2 assume role policy. + * `aws_iam_policy.certbot_policy`: IAM policy allowing Certbot to modify Route 53 records. + * `aws_iam_role_policy_attachment.certbot_policy_attachment`: Attaches the policy to the role. + * `aws_iam_instance_profile.certbot_profile`: Creates an instance profile for the IAM role. + +#### aws.tfvars +* Contains configuration variables for the AWS infrastructure +* Ensure the AMI ID `ami-xxxxxxxxxxxxxxxxx` is available in your specified region. +* The `user_data` script for the NGINX instance mounts an EBS volume at `/srv/nfs`. +* Modify the security group rules as per your security requirements. + +#### main.tf +* Defines the main resources and provider configuration: + * `Providers`: AWS provider configuration. + * `Security Groups`: aws_security_group.security-group for NGINX and Kubernetes. + * `EC2 Instances`: + * **aws_instance.NGINX_EC2_INSTANCE** for NGINX. + * **aws_instance.K8S_CLUSTER_EC2_INSTANCE** for Kubernetes. + * `Route 53 Records`: + * **aws_route53_record.MAP_DNS_TO_IP** for A records. + * **aws_route53_record.MAP_DNS_TO_CNAME** for CNAME records. + +#### outputs.tf +* Provides useful information after infrastructure creation. + +#### variables.tf +* Defines input variables used across the Terraform scripts + +#### Outputs +The script provides the following output values: + +* `K8S_CLUSTER_PUBLIC_IPS`: Public IPs of Kubernetes cluster nodes. +* `K8S_CLUSTER_PRIVATE_IPS`: Private IPs of Kubernetes cluster nodes. +* `NGINX_PUBLIC_IP`: Public IP of the NGINX instance. +* `NGINX_PRIVATE_IP`: Private IP of the NGINX instance. +* `MOSIP_NGINX_SG_ID`: Security group ID for NGINX. +* `MOSIP_K8S_SG_ID`: Security group ID for Kubernetes. +* `MOSIP_K8S_CLUSTER_NODES_PRIVATE_IP_LIST`: Comma-separated list of Kubernetes cluster nodes private IPs which will be used by Nginx terraform scripts. +* `MOSIP_PUBLIC_DOMAIN_LIST`: Comma-separated list of public domains configured in Route 53 which will be used by Nginx terraform scripts. diff --git a/deployment/v3/terraform/aws/modules/aws-resource-creation/certbot-ssl-certgen.tf b/deployment/v3/terraform/aws/modules/aws-resource-creation/certbot-ssl-certgen.tf new file mode 100644 index 000000000..f6ec412c8 --- /dev/null +++ b/deployment/v3/terraform/aws/modules/aws-resource-creation/certbot-ssl-certgen.tf @@ -0,0 +1,48 @@ +resource "aws_iam_role" "certbot_role" { + name = "certbot-route53-role" + assume_role_policy = < instance.public_ip } +} +output "K8S_CLUSTER_PRIVATE_IPS" { + value = { for key, instance in aws_instance.K8S_CLUSTER_EC2_INSTANCE : "${local.K8S_EC2_NODE.tags.Name}-${key + 1}" => instance.private_ip } +} +output "NGINX_PUBLIC_IP" { + value = aws_instance.NGINX_EC2_INSTANCE.public_ip +} +output "NGINX_PRIVATE_IP" { + value = aws_instance.NGINX_EC2_INSTANCE.private_ip +} +output "MOSIP_NGINX_SG_ID" { + value = aws_security_group.security-group["NGINX_SECURITY_GROUP"].id +} +output "MOSIP_K8S_SG_ID" { + value = aws_security_group.security-group["K8S_SECURITY_GROUP"].id +} +output "MOSIP_K8S_CLUSTER_NODES_PRIVATE_IP_LIST" { + value = join(",", aws_instance.K8S_CLUSTER_EC2_INSTANCE[*].private_ip) +} +output "MOSIP_PUBLIC_DOMAIN_LIST" { + value = join(",", concat( + [local.MAP_DNS_TO_IP.API_DNS.name], + [for cname in local.MAP_DNS_TO_CNAME : cname.name if contains([cname.records], local.MAP_DNS_TO_IP.API_DNS.name)] + )) +} diff --git a/deployment/v3/terraform/aws/modules/aws-resource-creation/variables.tf b/deployment/v3/terraform/aws/modules/aws-resource-creation/variables.tf new file mode 100644 index 000000000..11804e52c --- /dev/null +++ b/deployment/v3/terraform/aws/modules/aws-resource-creation/variables.tf @@ -0,0 +1,205 @@ +variable "AWS_PROVIDER_REGION" { type = string } +variable "CLUSTER_NAME" { type = string } +variable "SSH_KEY_NAME" { type = string } +variable "SECURITY_GROUP" { + type = map(list(object({ + description = string + from_port = number + to_port = number + protocol = string + cidr_blocks = list(string) + ipv6_cidr_blocks = list(string) + } + ))) +} +variable "K8S_INSTANCE_TYPE" { + type = string + validation { + condition = can(regex("^[a-z0-9]+\\..*", var.K8S_INSTANCE_TYPE)) + error_message = "Invalid instance type format. Must be in the form 'series.type'." + } +} +variable "AMI" { + type = string + validation { + condition = can(regex("^ami-[a-f0-9]{17}$", var.AMI)) + error_message = "Invalid AMI format. It should be in the format 'ami-xxxxxxxxxxxxxxxxx'" + } +} +variable "NGINX_INSTANCE_TYPE" { + type = string + validation { + condition = can(regex("^[a-z0-9]+\\..*", var.NGINX_INSTANCE_TYPE)) + error_message = "Invalid instance type format. Must be in the form 'series.type'." + } +} +variable "MOSIP_DOMAIN" { type = string } +variable "ZONE_ID" { type = string } + +# NGINX TAG NAME VARIABLE +locals { + TAG_NAME = { + NGINX_TAG_NAME = "${var.CLUSTER_NAME}-NGINX-NODE" + } +} + + +# DNS CONFIGURATION +locals { + MAP_DNS_TO_IP = { + API_DNS = { + name = "api.${var.MOSIP_DOMAIN}" + type = "A" + zone_id = var.ZONE_ID + ttl = 300 + records = aws_instance.NGINX_EC2_INSTANCE.tags.Name == local.TAG_NAME.NGINX_TAG_NAME ? aws_instance.NGINX_EC2_INSTANCE.public_ip : "" + #health_check_id = true + allow_overwrite = true + } + API_INTERNAL_DNS = { + name = "api-internal.${var.MOSIP_DOMAIN}" + type = "A" + zone_id = var.ZONE_ID + ttl = 300 + records = aws_instance.NGINX_EC2_INSTANCE.tags.Name == local.TAG_NAME.NGINX_TAG_NAME ? aws_instance.NGINX_EC2_INSTANCE.private_ip : "" + #health_check_id = true + allow_overwrite = true + } + } +} +locals { + MAP_DNS_TO_CNAME = { + MOSIP_HOMEPAGE_DNS = { + name = var.MOSIP_DOMAIN + type = "CNAME" + zone_id = var.ZONE_ID + ttl = 300 + records = local.MAP_DNS_TO_IP.API_INTERNAL_DNS.name + #health_check_id = true + allow_overwrite = true + } + ADMIN_DNS = { + name = "admin.${var.MOSIP_DOMAIN}" + type = "CNAME" + zone_id = var.ZONE_ID + ttl = 300 + records = local.MAP_DNS_TO_IP.API_INTERNAL_DNS.name + #health_check_id = true + allow_overwrite = true + } + PREREG_DNS = { + name = "prereg.${var.MOSIP_DOMAIN}" + type = "CNAME" + zone_id = var.ZONE_ID + ttl = 300 + records = local.MAP_DNS_TO_IP.API_DNS.name + #health_check_id = true + allow_overwrite = true + } + RESIDENT_DNS = { + name = "resident.${var.MOSIP_DOMAIN}" + type = "CNAME" + zone_id = var.ZONE_ID + ttl = 300 + records = local.MAP_DNS_TO_IP.API_DNS.name + #health_check_id = true + allow_overwrite = true + } + ESIGNET_DNS = { + name = "esignet.${var.MOSIP_DOMAIN}" + type = "CNAME" + zone_id = var.ZONE_ID + ttl = 300 + records = local.MAP_DNS_TO_IP.API_DNS.name + #health_check_id = true + allow_overwrite = true + } + } +} + + +# EC2 INSTANCE DATA: NGINX & K8S NODES +locals { + NGINX_INSTANCE = { + ami = var.AMI + instance_type = var.NGINX_INSTANCE_TYPE + associate_public_ip_address = true + key_name = var.SSH_KEY_NAME + user_data =<<-EOF +#!/bin/bash + +# Log file path +echo "[ Set Log File ] : " +LOG_FILE="/tmp/ebs-volume-mount.log" + +# Redirect stdout and stderr to log file +exec > >(tee -a "$LOG_FILE") 2>&1 + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes + +## Mount EBS volume +echo "[ Mount EBS volume to /srv/nfs directory ] : " +file -s /dev/xvdb +mkfs -t xfs /dev/xvdb +mkdir -p /srv/nfs +echo "/dev/xvdb /srv/nfs xfs defaults,nofail 0 2" >> /etc/fstab +mount -a +EOF + tags = { + Name = local.TAG_NAME.NGINX_TAG_NAME + Cluster = var.CLUSTER_NAME + } + security_groups = [ + aws_security_group.security-group["NGINX_SECURITY_GROUP"].id + ] + + root_block_device = { + volume_size = 30 + volume_type = "gp3" + delete_on_termination = true + encrypted = false + tags = { + Name = local.TAG_NAME.NGINX_TAG_NAME + Cluster = var.CLUSTER_NAME + } + } + ebs_block_device = [{ + device_name = "/dev/sdb" + volume_size = 10 + volume_type = "gp3" + delete_on_termination = true + encrypted = false + tags = { + Name = local.TAG_NAME.NGINX_TAG_NAME + Cluster = var.CLUSTER_NAME + } + }] + } + K8S_EC2_NODE = { + ami = var.AMI + instance_type = var.K8S_INSTANCE_TYPE + associate_public_ip_address = true + key_name = var.SSH_KEY_NAME + count = 1 + tags = { + Name = "${var.CLUSTER_NAME}-node" + Cluster = var.CLUSTER_NAME + } + security_groups = [ + aws_security_group.security-group["K8S_SECURITY_GROUP"].id + ] + + root_block_device = { + volume_size = 30 + volume_type = "gp3" + delete_on_termination = true + encrypted = false + + } + } +} diff --git a/deployment/v3/terraform/aws/modules/nginx-setup/README.md b/deployment/v3/terraform/aws/modules/nginx-setup/README.md new file mode 100644 index 000000000..00050940a --- /dev/null +++ b/deployment/v3/terraform/aws/modules/nginx-setup/README.md @@ -0,0 +1,77 @@ +## Terraform & Shell Script for Nginx Setup with SSL + +## Overview +This Terraform configuration script sets up a Nginx server with SSL certificates on an AWS EC2 instance. +It fetches SSL certificates using Certbot and integrates with Kubernetes infrastructure from a specified GitHub repository + +## Requirements + +* Terraform version: `v1.8.4` +* AWS Account +* AWS CLI configured with appropriate credentials + ``` + $ export AWS_ACCESS_KEY_ID= + $ export AWS_SECRET_ACCESS_KEY= + ``` +* Ensure SSH key created for accessing EC2 instances on AWS. +* Ensure you have access to the private SSH key that corresponds to the public key used when launching the EC2 instance. +* Domain and DNS: Ensure that you have a domain and that its DNS is managed by Route 53. +* Git is installed on the EC2 instance. + +## Files +* `main.tf`: Main Terraform script that defines providers, resources, and output values. +* `nginx-setup.sh`: This scripts install and setup nginx configuration. + +## Setup +* Initialize Terraform + ``` + terraform init + ``` +* Terraform validate & plan the terraform scripts: + ``` + terraform validate + ``` + ``` + terraform plan -var-file="aws.tfvars" + ``` +* Apply the Terraform configuration: + ``` + terraform apply -var-file="aws.tfvars" + ``` + +## Destroy +To destroy AWS resources, follow the steps below: +* Ensure to have `terraform.tfstate` file. + ``` + terraform destroy + ``` + +## Input Variables +* `NGINX_PUBLIC_IP`: The public IP address of the EC2 instance where Nginx will be set up. +* `MOSIP_DOMAIN`: The domain for which the wildcard SSL certificates will be generated. +* `MOSIP_K8S_CLUSTER_NODES_PRIVATE_IP_LIST`: A comma-separated list of Kubernetes cluster node's private IP addresses for Nginx configuration. +* `MOSIP_PUBLIC_DOMAIN_LIST`: A comma-separated list of public domain names associated for Nginx configuration. +* `CERTBOT_EMAIL`: Email address to be used for SSL certificate registration with Certbot. + +## Local Variables +The script `main.tf` defines a local variable NGINX_CONFIG containing various configuration parameters required for setting up Nginx and obtaining SSL certificates. + +## Terraform Scripts + +#### main.tf + +* **null_resource "Nginx-setup"**: This resource performs the following actions: + * `Triggers`: Sets up triggers based on the hash of the Kubernetes cluster nodes' private IP list and the public domain list. + * `Connection`: Defines the SSH connection parameters for the EC2 instance. + * `File Provisioner`: Uploads the nginx-setup.sh script to the EC2 instance. + * `Remote Exec Provisioner`: Executes the necessary commands to: + * Set environment variables. + * Run the nginx-setup.sh script. + +#### nginx-setup.sh: +This script performs the following actions: + * Logs the script execution. + * Installs Nginx and SSL dependencies. + * Obtains SSL certificates using Certbot. + * Enables and starts the Nginx service. + * Clones the specified Kubernetes infrastructure repository and runs the Nginx setup script from it. diff --git a/deployment/v3/terraform/aws/modules/nginx-setup/main.tf b/deployment/v3/terraform/aws/modules/nginx-setup/main.tf new file mode 100644 index 000000000..c1b83b2e9 --- /dev/null +++ b/deployment/v3/terraform/aws/modules/nginx-setup/main.tf @@ -0,0 +1,62 @@ +variable "NGINX_PUBLIC_IP" { type = string } +variable "MOSIP_DOMAIN" { type = string } +variable "MOSIP_K8S_CLUSTER_NODES_PRIVATE_IP_LIST" { type = string } +variable "MOSIP_PUBLIC_DOMAIN_LIST" { type = string } +variable "CERTBOT_EMAIL" { type = string } +variable "SSH_PRIVATE_KEY" { type = string } + +locals { + NGINX_CONFIG = { + mosip_domain = var.MOSIP_DOMAIN + env_var_file = "/etc/environment" + cluster_nginx_certs="/etc/letsencrypt/live/${var.MOSIP_DOMAIN}/fullchain.pem" + cluster_nginx_cert_key="/etc/letsencrypt/live/${var.MOSIP_DOMAIN}/privkey.pem" + cluster_node_ips=var.MOSIP_K8S_CLUSTER_NODES_PRIVATE_IP_LIST + cluster_public_domains=var.MOSIP_PUBLIC_DOMAIN_LIST + cluster_ingress_public_nodeport="30080" + cluster_ingress_internal_nodeport="31080" + cluster_ingress_postgres_nodeport="31432" + cluster_ingress_minio_nodeport="30900" + cluster_ingress_activemq_nodeport="31616" + certbot_email=var.CERTBOT_EMAIL + k8s_infra_repo_url="https://github.com/mosip/k8s-infra.git" + k8s_infra_branch="main" + working_dir="/home/ubuntu/" + nginx_location="./k8s-infra/mosip/on-prem/nginx" + } + + nginx_env_vars = [ + for key, value in local.NGINX_CONFIG : + "echo 'export ${key}=${value}' | sudo tee -a ${local.NGINX_CONFIG.env_var_file}" + ] +} + +resource "null_resource" "Nginx-setup" { + triggers = { + # node_count_or_hash = module.ec2-resource-creation.node_count + # or if you used hash: + node_hash = md5(var.MOSIP_K8S_CLUSTER_NODES_PRIVATE_IP_LIST) + public_dns_hash = md5(var.MOSIP_PUBLIC_DOMAIN_LIST) + } + connection { + type = "ssh" + host = var.NGINX_PUBLIC_IP + user = "ubuntu" # Change based on the AMI used + private_key = var.SSH_PRIVATE_KEY # content of your private key + } + provisioner file { + source = "./modules/nginx-setup/nginx-setup.sh" + destination = "/tmp/nginx-setup.sh" + } + provisioner "remote-exec" { + inline = concat( + local.nginx_env_vars, + [ + "echo \"export cluster_nginx_internal_ip=\"$(curl http://169.254.169.254/latest/meta-data/local-ipv4)\"\" | sudo tee -a ${local.NGINX_CONFIG.env_var_file}", + "echo \"export cluster_nginx_public_ip=\"$(curl http://169.254.169.254/latest/meta-data/local-ipv4)\"\" | sudo tee -a ${local.NGINX_CONFIG.env_var_file}", + "sudo chmod +x /tmp/nginx-setup.sh", + "sudo bash /tmp/nginx-setup.sh" + ] + ) + } +} \ No newline at end of file diff --git a/deployment/v3/terraform/aws/modules/nginx-setup/nginx-setup.sh b/deployment/v3/terraform/aws/modules/nginx-setup/nginx-setup.sh new file mode 100644 index 000000000..c73e2490f --- /dev/null +++ b/deployment/v3/terraform/aws/modules/nginx-setup/nginx-setup.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Log file path +echo "[ Set Log File ] : " +sudo mv /tmp/nginx-setup.log /tmp/nginx-setup.log.old || true +LOG_FILE="/tmp/nginx-setup.log" +ENV_FILE_PATH="/etc/environment" +source $ENV_FILE_PATH +env | grep cluster + +# Redirect stdout and stderr to log file +exec > >(tee -a "$LOG_FILE") 2>&1 + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes + +## Install Nginx, ssl dependencies +echo "[ Install nginx & ssl dependencies packages ] : " +sudo apt-get update +sudo apt install -y software-properties-common +sudo add-apt-repository universe +sudo apt update +sudo apt-get install nginx letsencrypt certbot python3-certbot-nginx python3-certbot-dns-route53 -y + +## Get ssl certificate automatically +echo "[ Generate SSL certificates from letsencrypt ] : " +sudo certbot certonly --dns-route53 -d "*.${mosip_domain}" -d "${mosip_domain}" --non-interactive --agree-tos --email "$certbot_email" + +## start and enable Nginx +echo "[ Start & Enable nginx ] : " +sudo systemctl enable nginx +sudo systemctl start nginx + +cd $working_dir +git clone $k8s_infra_repo_url -b $k8s_infra_branch || true # read it from variables +cd $nginx_location +./install.sh + diff --git a/deployment/v3/terraform/aws/outputs.tf b/deployment/v3/terraform/aws/outputs.tf new file mode 100644 index 000000000..df188a8f2 --- /dev/null +++ b/deployment/v3/terraform/aws/outputs.tf @@ -0,0 +1,24 @@ +output "K8S_CLUSTER_PUBLIC_IPS" { + value = module.aws-resource-creation.K8S_CLUSTER_PUBLIC_IPS +} +output "K8S_CLUSTER_PRIVATE_IPS" { + value = module.aws-resource-creation.K8S_CLUSTER_PRIVATE_IPS +} +output "NGINX_PUBLIC_IP" { + value = module.aws-resource-creation.NGINX_PUBLIC_IP +} +output "NGINX_PRIVATE_IP" { + value = module.aws-resource-creation.NGINX_PRIVATE_IP +} +output "MOSIP_NGINX_SG_ID" { + value = module.aws-resource-creation.MOSIP_NGINX_SG_ID +} +output "MOSIP_K8S_SG_ID" { + value = module.aws-resource-creation.MOSIP_K8S_SG_ID +} +output "MOSIP_K8S_CLUSTER_NODES_PRIVATE_IP_LIST" { + value = module.aws-resource-creation.MOSIP_K8S_CLUSTER_NODES_PRIVATE_IP_LIST +} +output "MOSIP_PUBLIC_DOMAIN_LIST" { + value = module.aws-resource-creation.MOSIP_PUBLIC_DOMAIN_LIST +} \ No newline at end of file diff --git a/deployment/v3/terraform/aws/variables.tf b/deployment/v3/terraform/aws/variables.tf new file mode 100644 index 000000000..03fbebd34 --- /dev/null +++ b/deployment/v3/terraform/aws/variables.tf @@ -0,0 +1,46 @@ +variable "AWS_PROVIDER_REGION" { type = string } +variable "CLUSTER_NAME" { type = string } +variable "SSH_PRIVATE_KEY" { type = string } +variable "MOSIP_DOMAIN" { + description = "MOSIP DOMAIN : (ex: sandbox.xyz.net)" + type = string + validation { + condition = can(regex("^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])\\.)+[a-zA-Z]{2,}$", var.MOSIP_DOMAIN)) + error_message = "The domain name must be a valid domain name, e.g., sandbox.xyz.net." + } +} + +variable "MOSIP_EMAIL_ID" { + description = "Email ID used by certbot to generate SSL certs for Nginx node" + type = string + validation { + condition = can(regex("^\\S+@\\S+\\.\\S+$", var.MOSIP_EMAIL_ID)) + error_message = "The email address must be a valid email format (e.g., user@example.com)." + } +} + +variable "SSH_KEY_NAME" { type = string } +variable "K8S_INSTANCE_TYPE" { + type = string + validation { + condition = can(regex("^[a-z0-9]+\\..*", var.K8S_INSTANCE_TYPE)) + error_message = "Invalid instance type format. Must be in the form 'series.type'." + } +} + +variable "NGINX_INSTANCE_TYPE" { + type = string + validation { + condition = can(regex("^[a-z0-9]+\\..*", var.NGINX_INSTANCE_TYPE)) + error_message = "Invalid instance type format. Must be in the form 'series.type'." + } +} +variable "AMI" { + type = string + validation { + condition = can(regex("^ami-[a-f0-9]{17}$", var.AMI)) + error_message = "Invalid AMI format. It should be in the format 'ami-xxxxxxxxxxxxxxxxx'" + } +} + +variable "ZONE_ID" { type = string } \ No newline at end of file diff --git a/deployment/v3/testrig/apitestrig/copy_cm.sh b/deployment/v3/testrig/apitestrig/copy_cm.sh index 61c8e5d7b..c7ca53b85 100755 --- a/deployment/v3/testrig/apitestrig/copy_cm.sh +++ b/deployment/v3/testrig/apitestrig/copy_cm.sh @@ -18,4 +18,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_cm # calling function \ No newline at end of file +copying_cm # calling function diff --git a/deployment/v3/testrig/apitestrig/copy_secrets.sh b/deployment/v3/testrig/apitestrig/copy_secrets.sh index 5c4fa0f44..76601da79 100755 --- a/deployment/v3/testrig/apitestrig/copy_secrets.sh +++ b/deployment/v3/testrig/apitestrig/copy_secrets.sh @@ -17,4 +17,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_secrets # calling function \ No newline at end of file +copying_secrets # calling function diff --git a/deployment/v3/testrig/apitestrig/delete.sh b/deployment/v3/testrig/apitestrig/delete.sh index 6a28aa852..1c737f567 100755 --- a/deployment/v3/testrig/apitestrig/delete.sh +++ b/deployment/v3/testrig/apitestrig/delete.sh @@ -27,4 +27,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -deleting_apitestrig # calling function \ No newline at end of file +deleting_apitestrig # calling function diff --git a/deployment/v3/testrig/apitestrig/install.sh b/deployment/v3/testrig/apitestrig/install.sh index 57cccbde0..e39a46f32 100755 --- a/deployment/v3/testrig/apitestrig/install.sh +++ b/deployment/v3/testrig/apitestrig/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=apitestrig -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS @@ -45,7 +45,7 @@ function installing_apitestrig() { echo "ERROR: Time should be in range ( 0-23 ); EXITING;"; exit 1; fi - + echo "Do you have public domain & valid SSL? (Y/n) " echo "Y: if you have public domain & valid ssl certificate" echo "n: If you don't have a public domain and a valid SSL certificate. Note: It is recommended to use this option only in development environments." @@ -60,6 +60,16 @@ function installing_apitestrig() { ENABLE_INSECURE='--set enable_insecure=true'; fi + read -p "Please provide the retention days to remove old reports ( Default: 3 )" reportExpirationInDays + + if [[ -z $reportExpirationInDays ]]; then + reportExpirationInDays=3 + fi + if ! [[ $reportExpirationInDays =~ ^[0-9]+$ ]]; then + echo "The variable \"reportExpirationInDays\" should contain only number; EXITING"; + exit 1; + fi + read -p "Please provide slack webhook URL to notify server end issues on your slack channel : " slackWebhookUrl if [ -z $slackWebhookUrl ]; then @@ -67,24 +77,24 @@ function installing_apitestrig() { exit 1; fi - valid_inputs=("yes" "no") - eSignetDeployed="" + valid_inputs=("yes" "no") + eSignetDeployed="" - while [[ ! " ${valid_inputs[@]} " =~ " ${eSignetDeployed} " ]]; do - read -p "Is the eSignet service deployed? (yes/no): " eSignetDeployed - eSignetDeployed=${eSignetDeployed,,} # Convert input to lowercase - done + while [[ ! " ${valid_inputs[@]} " =~ " ${eSignetDeployed} " ]]; do + read -p "Is the eSignet service deployed? (yes/no): " eSignetDeployed + eSignetDeployed=${eSignetDeployed,,} # Convert input to lowercase + done - if [[ $eSignetDeployed == "yes" ]]; then - echo "eSignet service is deployed. Proceeding with installation..." - else - echo "eSignet service is not deployed. hence will be skipping esignet related test-cases..." - fi + if [[ $eSignetDeployed == "yes" ]]; then + echo "eSignet service is deployed. Proceeding with installation..." + else + echo "eSignet service is not deployed. hence will be skipping esignet related test-cases..." + fi echo Installing apitestrig helm -n $NS install apitestrig mosip/apitestrig \ --set crontime="0 $time * * *" \ - -f values.yaml \ + -f values.yaml \ --version $CHART_VERSION \ --set apitestrig.configmaps.s3.s3-host='http://minio.minio:9000' \ --set apitestrig.configmaps.s3.s3-user-key='admin' \ @@ -95,7 +105,8 @@ function installing_apitestrig() { --set apitestrig.configmaps.apitestrig.ENV_USER="$ENV_USER" \ --set apitestrig.configmaps.apitestrig.ENV_ENDPOINT="https://$API_INTERNAL_HOST" \ --set apitestrig.configmaps.apitestrig.ENV_TESTLEVEL="smokeAndRegression" \ - --set apitestrig.secrets.apitestrig.slack-webhook-url="$slackWebhookUrl" \ + --set apitestrig.configmaps.apitestrig.reportExpirationInDays="$reportExpirationInDays" \ + --set apitestrig.configmaps.apitestrig.slack-webhook-url="$slackWebhookUrl" \ --set apitestrig.configmaps.apitestrig.eSignetDeployed="$eSignetDeployed" \ --set apitestrig.configmaps.apitestrig.NS="$NS" \ $ENABLE_INSECURE diff --git a/deployment/v3/testrig/apitestrig/values.yaml b/deployment/v3/testrig/apitestrig/values.yaml index f0af09ca5..dc15566f4 100644 --- a/deployment/v3/testrig/apitestrig/values.yaml +++ b/deployment/v3/testrig/apitestrig/values.yaml @@ -1,18 +1,17 @@ modules: - - name: prereg + prereg: enabled: true - - name: masterdata + masterdata: enabled: true - - name: idrepo + idrepo: enabled: true - - name: partner + partner: enabled: true - - name: resident + resident: enabled: true - - name: auth + auth: enabled: true - - name: esignet + esignet: enabled: true - - name: mimoto - enabled: true - + mimoto: + enabled: true \ No newline at end of file diff --git a/deployment/v3/testrig/dslrig/copy_cm.sh b/deployment/v3/testrig/dslrig/copy_cm.sh index 36e401766..9134d8701 100755 --- a/deployment/v3/testrig/dslrig/copy_cm.sh +++ b/deployment/v3/testrig/dslrig/copy_cm.sh @@ -18,4 +18,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_cm # calling function \ No newline at end of file +copying_cm # calling function diff --git a/deployment/v3/testrig/dslrig/copy_secrets.sh b/deployment/v3/testrig/dslrig/copy_secrets.sh index b889b72e5..519462ce7 100755 --- a/deployment/v3/testrig/dslrig/copy_secrets.sh +++ b/deployment/v3/testrig/dslrig/copy_secrets.sh @@ -17,4 +17,4 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true set -o nounset ## set -u : exit the script if you try to use an uninitialised variable set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes -copying_secrets # calling function \ No newline at end of file +copying_secrets # calling function diff --git a/deployment/v3/testrig/dslrig/install.sh b/deployment/v3/testrig/dslrig/install.sh index 17906f448..c0fef9b8a 100755 --- a/deployment/v3/testrig/dslrig/install.sh +++ b/deployment/v3/testrig/dslrig/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=dslrig -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS @@ -15,12 +15,6 @@ kubectl create ns $NS function installing_dslrig() { ENV_NAME=$( kubectl -n default get cm global -o json |jq -r '.data."installation-domain"') - read -p "Please provide NFS host : " NFS_HOST - read -p "Please provide NFS pem file for SSH login : " NFS_PEM_FILE - read -p "Please provide user for SSH login : " NFS_USER - echo -e "[nfs_server]\nnfsserver ansible_user=$NFS_USER ansible_host=$NFS_HOST ansible_ssh_private_key_file=$NFS_PEM_FILE" env_name="$ENV_NAME" > hosts.ini - ansible-playbook -i hosts.ini nfs-server.yaml - read -p "Please enter the time(hr) to run the cronjob every day (time: 0-23) : " time if [ -z "$time" ]; then echo "ERROT: Time cannot be empty; EXITING;"; @@ -98,8 +92,6 @@ function installing_dslrig() { --set dslorchestrator.configmaps.dslorchestrator.USER="$USER" \ --set dslorchestrator.configmaps.dslorchestrator.ENDPOINT="https://$API_INTERNAL_HOST" \ --set dslorchestrator.configmaps.dslorchestrator.packetUtilityBaseUrl="$packetUtilityBaseUrl" \ - --set persistence.nfs.server="$NFS_HOST" \ - --set persistence.nfs.path="/srv/nfs/mosip/dsl-scenarios/$ENV_NAME" \ --set dslorchestrator.configmaps.dslorchestrator.reportExpirationInDays="$reportExpirationInDays" \ --set dslorchestrator.configmaps.dslorchestrator.NS="$NS" \ $ENABLE_INSECURE diff --git a/deployment/v3/testrig/packetcreator/README.md b/deployment/v3/testrig/packetcreator/README.md index 806e388b5..3a121385d 100644 --- a/deployment/v3/testrig/packetcreator/README.md +++ b/deployment/v3/testrig/packetcreator/README.md @@ -9,12 +9,13 @@ Packetcreator will create packets for DSL orchestrator. ```sh ./install.sh ``` +* During the execution of the `install.sh` script, a prompt appears requesting information regarding the presence of a public domain and a valid SSL certificate on the server. +* If the server lacks a public domain and a valid SSL certificate, it is advisable to select the `n` option. Opting it will enable the `init-container` with an `emptyDir` volume and include it in the deployment process. +* The init-container will proceed to download the server's self-signed SSL certificate and mount it to the specified location within the container's Java keystore (i.e., `cacerts`) file. +* This particular functionality caters to scenarios where the script needs to be employed on a server utilizing self-signed SSL certificates. + ## Uninstall * To uninstall Packetcreator, run `delete.sh` script. ```sh ./delete.sh ``` -* During the execution of the `install.sh` script, a prompt appears requesting information regarding the presence of a public domain and a valid SSL certificate on the server. -* If the server lacks a public domain and a valid SSL certificate, it is advisable to select the `n` option. Opting it will enable the `init-container` with an `emptyDir` volume and include it in the deployment process. -* The init-container will proceed to download the server's self-signed SSL certificate and mount it to the specified location within the container's Java keystore (i.e., `cacerts`) file. -* This particular functionality caters to scenarios where the script needs to be employed on a server utilizing self-signed SSL certificates. diff --git a/deployment/v3/testrig/packetcreator/install.sh b/deployment/v3/testrig/packetcreator/install.sh index 1ce167222..a40dd6903 100755 --- a/deployment/v3/testrig/packetcreator/install.sh +++ b/deployment/v3/testrig/packetcreator/install.sh @@ -7,19 +7,13 @@ if [ $# -ge 1 ] ; then fi NS=packetcreator -CHART_VERSION=12.0.1 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS function installing_packetcreator() { - read -p "Please provide NFS host : " NFS_HOST - read -p "Please provide NFS pem file for SSH login : " NFS_PEM_FILE - read -p "Please provide user for SSH login : " NFS_USER - echo -e "[nfs_server]\nnfsserver ansible_user=$NFS_USER ansible_host=$NFS_HOST ansible_ssh_private_key_file=$NFS_PEM_FILE" > hosts.ini - ansible-playbook -i hosts.ini nfs-server.yaml - echo "Select the type of Ingress controller to be used (1/2): "; echo "1. Ingress"; echo "2. Istio"; @@ -67,7 +61,6 @@ function installing_packetcreator() { echo Installing packetcreator helm -n $NS install packetcreator mosip/packetcreator \ $( echo $list ) \ - --set persistence.nfs.server="$NFS_HOST" \ --wait --version $CHART_VERSION $ENABLE_INSECURE echo Installed packetcreator. return 0 @@ -80,4 +73,3 @@ set -o nounset ## set -u : exit the script if you try to use an uninitialised set -o errtrace # trace ERR through 'time command' and other functions set -o pipefail # trace ERR through pipes installing_packetcreator # calling function - diff --git a/deployment/v3/testrig/uitestrig/install.sh b/deployment/v3/testrig/uitestrig/install.sh index c0d06c769..b4f07d88e 100755 --- a/deployment/v3/testrig/uitestrig/install.sh +++ b/deployment/v3/testrig/uitestrig/install.sh @@ -7,7 +7,7 @@ if [ $# -ge 1 ] ; then fi NS=uitestrig -CHART_VERSION=12.0.2 +CHART_VERSION=0.0.1-develop echo Create $NS namespace kubectl create ns $NS diff --git a/deployment/v3/utils/bqatsdk_jar_build/README.md b/deployment/v3/utils/bqatsdk_jar_build/README.md new file mode 100644 index 000000000..1bec7acc2 --- /dev/null +++ b/deployment/v3/utils/bqatsdk_jar_build/README.md @@ -0,0 +1,90 @@ +# Build Process for BQAT SDK Jar + +## Overview +The process involves building the BQAT SDK jar locally and then adding it to the Artifactory pod. + +1. **Clone Repository**: + Clone the bqat-sdk repository to your local machine. + ```bash + git clone https://github.com/JanardhanBS-SyncByte/bqat-sdk + ``` + +2. **Switch Branch**: + Navigate to the cloned repository and checkout to the required branch (e.g., develop). + ```bash + cd bqat-sdk/ + git checkout develop + ``` + +3. **Build with Maven**: + Ensure you are in the directory containing the `pom.xml` file, then build it using Maven. + ```bash + mvn clean install -Dgpg.skip + ``` + +4. **Zip the Jar**: + Zip the generated jar file with its dependencies. + ```bash + zip bqat-sdk-0.0.2-jar-with-dependencies.zip bqat-sdk-0.0.2-jar-with-dependencies.jar + ``` + +5. **Modify Artifactory Deployment Provide Root Access For The Pod**: + Update the Artifactory deployment file in the environment to ensure the pod runs with root access. + ```yaml + schedulerName: default-scheduler + securityContext: + runAsUser: 0 + ``` + +6. **Export Jar to Artifactory Pod**: + Copy the built jar to the Artifactory pod in the designated namespace (replace artifactory pod name respectively "eg:artifactory-bqatsdk-577459987c-67pht"). + ```bash + kubectl -n bqatsdk cp /path/to/bqat-sdk-0.0.2-jar-with-dependencies.zip artifactory-bqatsdk-577459987c-67pht:/usr/share/nginx/html + ``` + +7. **Verify Jar in Pod**: + Access the pod using Execute shell and verify if the jar is successfully copied . + ```bash + ls /usr/share/nginx/html + ``` + +8. **Login to Node**: + log in to the node where the pod is running. + +9. **Check Docker**: + View active Docker containers. + ```bash + docker ps + ``` + +10. **Docker Login**: + If required, log in to Docker with your credentials. + ```bash + docker login + ``` + +11. **Commit Changes**: + Commit the changes to Artifactory Docker image. + ```bash + docker commit CONTAINERID IMAGE + ``` + +12. **Push to Docker Repo**: + Push the changes to the Docker repository. + ```bash + docker push IMAGE + ``` +**NOTE: Push the image to required docker hub** + +13. **Deploy Artifactory Image**: + After pushing changes to docker hub, deploy the Artifactory pod in the `bqatsdk` namespace with the new image created. + +14. **Map New Artifactory zip_file_path in bqatsdk-service YAML file **: + ``` + - name: biosdk_zip_file_path + value: >- + http://artifactory-bqatsdk.bqatsdk:80/bqat-sdk-0.0.2-jar-with-dependencies.zip + ``` + + +This standardizes the process for building and deploying the BQAT SDK jar. \ No newline at end of file diff --git a/deployment/v3/utils/info/README.md b/deployment/v3/utils/info/README.md new file mode 100644 index 000000000..ba383e55d --- /dev/null +++ b/deployment/v3/utils/info/README.md @@ -0,0 +1,9 @@ +# info + +Displays the image, imageId and Helm chart versions used for every deployment in the cluster across all namespaces. + +Host: /info +## Install +```sh +./install.sh +``` diff --git a/deployment/v3/utils/info/delete.sh b/deployment/v3/utils/info/delete.sh new file mode 100644 index 000000000..3378c085f --- /dev/null +++ b/deployment/v3/utils/info/delete.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Uninstalls info +# Usage: ./delete.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +function deleting_info() { + NS=info + while true; do + read -p "Are you sure you want to delete info helm chart?(Y/n) " yn + if [ $yn = "Y" ] + then + helm -n $NS delete info + break + else + break + fi + done + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +deleting_info # calling function diff --git a/deployment/v3/utils/info/install.sh b/deployment/v3/utils/info/install.sh new file mode 100644 index 000000000..9b1c9715f --- /dev/null +++ b/deployment/v3/utils/info/install.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Installs info +## Usage: ./install.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +NS=info +CHART_VERSION=0.0.1-develop + +echo Create $NS namespace +kubectl create ns $NS + +function installing_info() { + echo Istio label + kubectl label ns $NS istio-injection=enabled --overwrite + helm repo update + + echo Installing info + helm -n $NS install info mosip/info --version $CHART_VERSION + + kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status + + echo Installed info service + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +installing_info # calling function diff --git a/deployment/v3/utils/info/restart.sh b/deployment/v3/utils/info/restart.sh new file mode 100644 index 000000000..8f76351a0 --- /dev/null +++ b/deployment/v3/utils/info/restart.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Restart the info +## Usage: ./restart.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +function Restarting_info() { + NS=info + kubectl -n $NS rollout restart deploy + + kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status + + echo Restarted info + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +Restarting_info # calling function diff --git a/deployment/v3/utils/prop_migrator/Overview.md b/deployment/v3/utils/prop_migrator/Overview.md index b1f61f44d..68924cb04 100644 --- a/deployment/v3/utils/prop_migrator/Overview.md +++ b/deployment/v3/utils/prop_migrator/Overview.md @@ -145,5 +145,3 @@ Note: - It is highly recommended to create backups of your property files before running this script, as it modifies the files in place. - The `manual-configuration.csv` file can be used to manually update the property files based on the required configuration actions mentioned in the file. - ----- diff --git a/deployment/v3/utils/prop_migrator/README.md b/deployment/v3/utils/prop_migrator/README.md index 9741154cf..dc6ff5eac 100644 --- a/deployment/v3/utils/prop_migrator/README.md +++ b/deployment/v3/utils/prop_migrator/README.md @@ -147,6 +147,3 @@ Note: - It is highly recommended to create backups of your property files before running this script, as it modifies the files in place. - The `manual-configuration.csv` file can be used to manually update the property files based on the required configuration actions mentioned in the file. - ----- - diff --git a/deployment/v3/utils/prop_migrator/file_comparator.py b/deployment/v3/utils/prop_migrator/file_comparator.py index 354443fee..749f391a1 100755 --- a/deployment/v3/utils/prop_migrator/file_comparator.py +++ b/deployment/v3/utils/prop_migrator/file_comparator.py @@ -134,4 +134,4 @@ writer.writerow(item) print("latest_file_only.csv created successfully.") -print("Script completed successfully.") \ No newline at end of file +print("Script completed successfully.") diff --git a/deployment/v3/utils/prop_migrator/knowledge/latest-Value-takes-priority.csv b/deployment/v3/utils/prop_migrator/knowledge/latest-Value-takes-priority.csv index 6d351dbef..8a182dfaf 100644 --- a/deployment/v3/utils/prop_migrator/knowledge/latest-Value-takes-priority.csv +++ b/deployment/v3/utils/prop_migrator/knowledge/latest-Value-takes-priority.csv @@ -1,4 +1,6 @@ Property file name,key +admin-default.properties,mosip.kernel.database.hostname +admin-default.properties,mosip.kernel.database.port admin-default.properties,authmanager.base.url admin-default.properties,mosip.admin.accountmgmt.auth-manager-base-uri admin-default.properties,auth.server.validate.url @@ -24,6 +26,9 @@ application-default.properties,auth-token-generator.rest.issuerUrl application-default.properties,mosip.kernel.keymanager.cert.url application-default.properties,auth.server.admin.validate.url application-default.properties,mosip.kernel.auth.appid-realm-map +application-default.properties,packetmanager.default.priority +application-default.properties,provider.packetreader.mosip +application-default.properties,provider.packetwriter.mosip application-default.properties,hazelcast.config application-default.properties,CRYPTOMANAGER_DECRYPT application-default.properties,CRYPTOMANAGER_ENCRYPT @@ -44,6 +49,7 @@ Data-share-default.properties,data.share.token.request.issuerUrl Data-share-default.properties,mosip.data.share.prependThumbprint kernel-default.properties,mosip.kernel.syncdata.auth-manager-base-uri kernel-default.properties,mosip.kernel.sms.gateway +kernel-default.properties,mosip.kernel.notification.email.from kernel-default.properties,spring.mail.host kernel-default.properties,spring.mail.username kernel-default.properties,spring.mail.password @@ -74,6 +80,7 @@ kernel-default.properties,db_3_DS.keycloak.port kernel-default.properties,db_3_DS.keycloak.username kernel-default.properties,db_3_DS.keycloak.password kernel-default.properties,mosip.keycloak.admin.secret.key +kernel-default.properties,mosip.kernel.prereg.realm-id kernel-default.properties,mosip.kernel.prereg.secret.key kernel-default.properties,kernel.uin.transfer-scheduler-minutes kernel-default.properties,kernel.uin.transfer-scheduler-hours @@ -86,6 +93,8 @@ kernel-default.properties,mosip.kernel.authtoken.OTP.internal.url kernel-default.properties,mosip.kernel.authtoken.REFRESH.internal.url kernel-default.properties,mosip.kernel.auth.sendotp.url kernel-default.properties,syncdata.websub.callback.url.ca-cert +hotlist-default.properties,mosip.hotlist.db.url +hotlist-default.properties,mosip.hotlist.db.port hotlist-default.properties,mosip.hotlist.db.password hotlist-default.properties,mosip.hotlist.allowedIdTypes hotlist-default.properties,mosip.hotlist.topic-to-publish @@ -95,6 +104,8 @@ hotlist-default.properties,mosip.idrepo.audit.rest.uri hotlist-default.properties,mosip.hotlist.encryptor.rest.uri hotlist-default.properties,mosip.hotlist.decryptor.rest.uri hotlist-default.properties,mosip.hotlist.audit.rest.uri +id-authentication-default.properties,mosip.ida.database.hostname +id-authentication-default.properties,mosip.ida.database.port id-authentication-default.properties,mosip.ida.database.password id-authentication-default.properties,mosip.ida.auth.clientId id-authentication-default.properties,mosip.ida.auth.secretKey @@ -117,6 +128,8 @@ id-authentication-default.properties,ida-websub-partner-service-callback-secret id-authentication-default.properties,ida-websub-hotlist-callback-secret id-authentication-default.properties,mosip.kernel.tokenid.uin.salt id-authentication-default.properties,mosip.kernel.tokenid.partnercode.salt +id-repository-default.properties,mosip.idrepo.db.url +id-repository-default.properties,mosip.idrepo.db.port id-repository-default.properties,mosip.idrepo.db.identity.password id-repository-default.properties,mosip.idrepo.db.vid.password id-repository-default.properties,kernel.retry.retryable.exceptions @@ -132,6 +145,8 @@ id-repository-default.properties,mosip.idrepo.credential.request.rest.uri id-repository-default.properties,mosip.idrepo.credential.request.rest.timeout id-repository-default.properties,mosip.idrepo.retrieve-by-uin.rest.uri id-repository-default.properties,mosip.idrepo.vid-generator.rest.uri +id-repository-default.properties,mosip.credential.service.database.hostname +id-repository-default.properties,mosip.credential.service.database.port id-repository-default.properties,mosip.credential.service.jdbc.password id-repository-default.properties,credential.request.token.request.appid id-repository-default.properties,credential.request.token.request.clientId @@ -151,6 +166,7 @@ id-repository-default.properties,PARTNER_EXTRACTION_POLICY id-repository-default.properties,credential.service.token.request.issuerUrl id-repository-default.properties,object.store.s3.accesskey id-repository-default.properties,object.store.s3.secretkey +id-repository-default.properties,object.store.s3.url id-repository-default.properties,object.store.s3.region mimoto-default.properties,public.url mimoto-default.properties,mosip.resident.base.url @@ -158,14 +174,20 @@ mimoto-default.properties,mosip.event.hubUrl mimoto-default.properties,token.request.issuerUrl mimoto-default.properties,AUDIT mimoto-default.properties,MASTER +mimoto-default.properties,mosip.optional-languages mimoto-default.properties,CREATEDATASHARE mimoto-default.properties,DECRYPTPINBASSED mimoto-default.properties,mosip.iam.adapter.clientsecret mimoto-default.properties,auth.server.admin.issuer.uri mimoto-default.properties,mosip.iam.adapter.issuerURL +mock-abis-default.properties,abis.return.duplicate +partner-management-default.properties,mosip.pmp.database.hostname +partner-management-default.properties,mosip.pmp.database.port partner-management-default.properties,mosip.pmp.database.password partner-management-default.properties,mosip.authdevice.database.password partner-management-default.properties,mosip.regdevice.database.password +partner-management-default.properties,hibernate.show_sql +partner-management-default.properties,hibernate.format_sql partner-management-default.properties,mosip.pmp.auth.secretKey partner-management-default.properties,pms.cert.service.token.request.issuerUrl partner-management-default.properties,pmp.ca.certificaticate.upload.rest.uri @@ -176,6 +198,11 @@ partner-management-default.properties,pmp.allowed.credential.types partner-management-default.properties,mosip.iam.module.login_flow.scope partner-management-default.properties,PASSWORDBASEDTOKENAPI pre-registration-default.properties,mosip.database.ip +partner-management-default.properties,mosip.iam.module.clientID +partner-management-default.properties,mosip.iam.module.login_flow.scope +partner-management-default.properties,PASSWORDBASEDTOKENAPI +pre-registration-default.properties,mosip.database.ip +pre-registration-default.properties,mosip.database.port pre-registration-default.properties,logging.level.com.zaxxer.hikari pre-registration-default.properties,mosip.adult.age pre-registration-default.properties,demographic.service.env @@ -183,6 +210,7 @@ pre-registration-default.properties,document.service.env pre-registration-default.properties,booking.service.env pre-registration-default.properties,preregistration.timespan.rebook pre-registration-default.properties,preregistration.timespan.cancel +pre-registration-default.properties,batch.service.env pre-registration-default.properties,mosip.batch.token.authmanager.password pre-registration-default.properties,batch.appointment.cancel pre-registration-default.properties,masterdata.service.env @@ -190,6 +218,7 @@ pre-registration-default.properties,holiday.exceptional.url pre-registration-default.properties,working.day.url pre-registration-default.properties,mosip.kernel.masterdata.validdoc.rest.uri pre-registration-default.properties,notification.service.env +pre-registration-default.properties,notification.url pre-registration-default.properties,email.service.env pre-registration-default.properties,sms.service.env pre-registration-default.properties,audit.service.env @@ -202,20 +231,26 @@ pre-registration-default.properties,mosip.base.url pre-registration-default.properties,ui.config.params pre-registration-default.properties,preregistration.ui.version pre-registration-default.properties,mosip.preregistration.captcha.secretkey +pre-registration-default.properties,mosip.preregistration.captcha.resourse.url +print-default.properties,mosip.event.callBackUrl print-default.properties,mosip.event.secret print-default.properties,PDFSIGN print-default.properties,registration.processor.identityjson print-default.properties,mosip.print.prependThumbprint +registration-default.properties,mosip.registration.consent_ara registration-default.properties,mosip.registration.consent_fra registration-default.properties,mosip.mdm.enabled registration-default.properties,mosip.kernel.transliteration.arabic-language-code +registration-default.properties,mosip.kernel.transliteration.franch-language-code registration-default.properties,mosip.registration.reset_password_url +registration-processor-default.properties,mosip.registration.processor.database.hostname registration-processor-default.properties,registration.processor.zone registration-processor-default.properties,cluster.manager.file.name registration-processor-default.properties,camel.secure.active.flows.file.names registration-processor-default.properties,mosip.regproc.workflow.complete.topic registration-processor-default.properties,token.request.secretKey registration-processor-default.properties,INTERNALAUTH +registration-processor-default.properties,REVERSEDATASYNC registration-processor-default.properties,DEVICEVALIDATEHISTORY registration-processor-default.properties,IDREPOSITORY registration-processor-default.properties,IDREPOGETIDBYUIN @@ -229,16 +264,21 @@ registration-processor-default.properties,GETVIDSBYUIN registration-processor-default.properties,ENCRYPTURL registration-processor-default.properties,ENCRYPTIONSERVICE registration-processor-default.properties,DIGITALSIGNATURE +registration-processor-default.properties,NGINXDMZURL registration-processor-default.properties,CRYPTOMANAGERDECRYPT registration-processor-default.properties,SMSNOTIFIER registration-processor-default.properties,EMAILNOTIFIER registration-processor-default.properties,PMS +registration-processor-default.properties,registration.processor.queue.username registration-processor-default.properties,registration.processor.queue.password +registration-processor-default.properties,registration.processor.queue.url +registration-processor-default.properties,registration.processor.reprocess.minutes registration-processor-default.properties,IDAUTHENCRYPTION registration-processor-default.properties,IDAUTHPUBLICKEY registration-processor-default.properties,IDAUTHCERTIFICATE registration-processor-default.properties,ida-internal-auth-uri registration-processor-default.properties,ida-internal-get-certificate-uri +registration-processor-default.properties,packetmanager.base.url registration-processor-default.properties,DATASHARECREATEURL registration-processor-default.properties,DATASHAREGETEURL registration-processor-default.properties,mosip.regproc.eventbus.kafka.bootstrap.servers @@ -263,10 +303,12 @@ registration-processor-default.properties,mosip.regproc.message.sender.eventbus. registration-processor-default.properties,mosip.regproc.printing.eventbus.kafka.max.poll.records registration-processor-default.properties,mosip.regproc.packet.classifier.tag-generators registration-processor-default.properties,mosip.regproc.packet.classifier.tagging.metainfo.operationsdata.tag-labels +registration-processor-default.properties,registration.processor.queue.manual.adjudication.request resident-default.properties,mosip.resident.service.status.check.id resident-default.properties,DECRYPT_API_URL resident-default.properties,resident.secretKey resident-default.properties,KERNELAUTHMANAGER +resident-default.properties,REGPROCPRINT resident-default.properties,INTERNALAUTHTRANSACTIONS resident-default.properties,KERNELENCRYPTIONSERVICE resident-default.properties,IDAUTHCREATEVID @@ -275,7 +317,9 @@ resident-default.properties,IDREPOGETIDBYRID resident-default.properties,RIDGENERATION resident-default.properties,MIDSCHEMAURL resident-default.properties,SYNCSERVICE +resident-default.properties,PACKETRECEIVER resident-default.properties,AUTHTYPESTATUSUPDATE +resident-default.properties,REGISTRATIONSTATUSSEARCH resident-default.properties,POLICY_REQ_URL resident-default.properties,OTP_GEN_URL resident-default.properties,CREDENTIAL_STATUS_URL @@ -285,4 +329,82 @@ resident-default.properties,CREDENTIAL_TYPES_URL resident-default.properties,PARTNER_API_URL resident-default.properties,resident.identityjson Syncdata-default.properties,licensekeymanager_database_password +registration-processor-default.properties,registration.processor.reprocess.minutes: +registration-processor-default.properties,mosip.kernel.virus-scanner.port: +registration-processor-default.properties,registration.processor.queue.username: +registration-default.properties,mosip.registration.num_of_fingerprint_retries +registration-default.properties,mosip.registration.num_of_iris_retries +registration-default.properties,mosip.registration.num_of_face_retries +registration-default.properties,mosip.registration.leftslap_fingerprint_threshold +registration-default.properties,mosip.registration.rightslap_fingerprint_threshold +registration-default.properties,mosip.registration.thumbs_fingerprint_threshold +registration-default.properties,mosip.registration.iris_threshold +registration-default.properties,mosip.registration.document_size +registration-default.properties,mosip.registration.pre_reg_no_of_days_limit +registration-default.properties,mosip.registration.capture_time_out +registration-default.properties,mosip.registration.reg_pak_max_cnt_apprv_limit +registration-default.properties,mosip.registration.reg_pak_max_time_apprv_limit +registration-default.properties,mosip.registration.audit_log_deletion_configured_days +registration-default.properties,mosip.registration.reg_deletion_configured_days +registration-default.properties,mosip.registration.pre_reg_deletion_configured_days +registration-default.properties,mosip.registration.sync_transaction_no_of_days_limit +registration-default.properties,mosip.registration.last_export_registration_config_time +registration-default.properties,mosip.registration.registration_packet_store_location +registration-default.properties,mosip.registration.registration_pre_reg_packet_location +registration-default.properties,mosip.registration.mds.deduplication.enable.flag +registration-default.properties,mosip.registration.mdm.portRangeFrom +registration-default.properties,mosip.registration.mdm.portRangeTo +registration-default.properties,mosip.registration.server_profile +application-default.properties,mosip.right_to_left_orientation +application-default.properties,mosip.kernel.keygenerator.symmetric-key-length +application-default.properties,mosip.kernel.keygenerator.asymmetric-key-length +application-default.properties,mosip.kernel.keygenerator.asymmetric-algorithm-name +application-default.properties,mosip.kernel.keygenerator.symmetric-algorithm-name +application-default.properties,mosip.kernel.crypto.symmetric-algorithm-name +application-default.properties,mosip.kernel.crypto.asymmetric-algorithm-name +application-default.properties,mosip.kernel.crypto.gcm-tag-length +application-default.properties,mosip.kernel.crypto.hash-symmetric-key-length +application-default.properties,mosip.kernel.crypto.hash-algorithm-name +application-default.properties,mosip.kernel.crypto.sign-algorithm-name +application-default.properties,mosip.kernel.crypto.hash-iteration +application-default.properties,mosip.kernel.data-key-splitter +application-default.properties,mosip.kernel.signature.signature-request-id +application-default.properties,mosip.kernel.signature.signature-version-id +application-default.properties,mosip.kernel.prid.restricted-numbers +application-default.properties,mosip.kernel.prid.length +application-default.properties,mosip.kernel.prid.sequence-limit +application-default.properties,mosip.kernel.prid.repeating-block-limit +application-default.properties,mosip.kernel.prid.repeating-limit +application-default.properties,mosip.kernel.prid.not-start-with +application-default.properties,mosip.kernel.uin.length +application-default.properties,mosip.kernel.uin.restricted-numbers +application-default.properties,mosip.kernel.uin.length.repeating-block-limit +application-default.properties,mosip.kernel.uin.length.sequence-limit +application-default.properties,mosip.kernel.uin.length.repeating-limit +application-default.properties,mosip.kernel.uin.length.conjugative-even-digits-limit +application-default.properties,mosip.kernel.uin.length.reverse-digits-limit +application-default.properties,mosip.kernel.uin.length.digits-limit +application-default.properties,mosip.kernel.vid.restricted-numbers +application-default.properties,mosip.kernel.vid.not-start-with +application-default.properties,mosip.kernel.vid.length.repeating-limit +application-default.properties,mosip.kernel.vid.length.repeating-block-limit +application-default.properties,mosip.kernel.vid.length.sequence-limit +application-default.properties,mosip.kernel.vid.length +application-default.properties,mosip.kernel.registrationcenterid.length +application-default.properties,mosip.kernel.machineid.length +application-default.properties,mosip.kernel.rid.length +application-default.properties,mosip.kernel.rid.timestamp-length +application-default.properties,mosip.kernel.rid.sequence-length +application-default.properties,mosip.kernel.otp.expiry-time +application-default.properties,mosip.primary-language +application-default.properties,mosip.secondary-language +application-default.properties,mosip.kernel.applicant.type.age.limit +application-default.properties,mosip.idrepo.identity.allowedBioAttributes +application-default.properties,mosip.idrepo.identity.bioAttributes application-default.properties,mosip.supported-languages +application-default.properties,mosip.stage.environment +application-default.properties,mosip.keycloak.max-no-of-users +application-default.properties,mosip.recommended.centers.locCode +application-default.properties,mosip.country.code +application-default.properties,mosip.notification.timezone +application-default.properties,mosip.kernel.filtervalue.max_columns diff --git a/deployment/v3/utils/prop_migrator/knowledge/new-property-with-decent-default-value.csv b/deployment/v3/utils/prop_migrator/knowledge/new-property-with-decent-default-value.csv index c0b8bc8cb..288a891ae 100644 --- a/deployment/v3/utils/prop_migrator/knowledge/new-property-with-decent-default-value.csv +++ b/deployment/v3/utils/prop_migrator/knowledge/new-property-with-decent-default-value.csv @@ -62,6 +62,45 @@ admin-default.properties,mosip.iam.end-session-endpoint-path application-default.properties,mosip.recommended.centers.locCode application-default.properties,mosipbox.public.url application-default.properties,mosip.api.internal.url +<<<<<<< HEAD +======= +application-default.properties,mosip.kernel.authmanager.url +application-default.properties,mosip.kernel.masterdata.url +application-default.properties,mosip.kernel.keymanager.url +application-default.properties,mosip.kernel.auditmanager.url +application-default.properties,mosip.kernel.notification.url +application-default.properties,mosip.kernel.idgenerator.url +application-default.properties,mosip.kernel.otpmanager.url +application-default.properties,mosip.kernel.syncdata.url +application-default.properties,mosip.kernel.pridgenerator.url +application-default.properties,mosip.kernel.ridgenerator.url +application-default.properties,mosip.idrepo.identity.url +application-default.properties,mosip.idrepo.vid.url +application-default.properties,mosip.admin.hotlist.url +application-default.properties,mosip.admin.service.url +application-default.properties,mosip.admin.ui.url +application-default.properties,mosip.pms.policymanager.url +application-default.properties,mosip.pms.partnermanager.url +application-default.properties,mosip.pms.ui.url +application-default.properties,mosip.idrepo.credrequest.generator.url +application-default.properties,mosip.idrepo.credential.service.url +application-default.properties,mosip.datashare.url +application-default.properties,mosip.mock.biosdk.url +application-default.properties,mosip.regproc.biosdk.url +application-default.properties,mosip.idrepo.biosdk.url +application-default.properties,mosip.regproc.workflow.url +application-default.properties,mosip.regproc.status.service.url +application-default.properties,mosip.regproc.transaction.service.url +application-default.properties,mosip.packet.receiver.url +application-default.properties,mosip.websub.url +application-default.properties,mosip.consolidator.url +application-default.properties,mosip.file.server.url +application-default.properties,mosip.ida.internal.url +application-default.properties,mosip.ida.auth.url +application-default.properties,mosip.ida.otp.url +application-default.properties,mosip.resident.url +application-default.properties,mosip.artifactory.url +>>>>>>> [DSD-2904] added property migrator application-default.properties,config.server.file.storage.uri application-default.properties,mosip.idobjectvalidator.masterdata.rest.uri application-default.properties,mosip.kernel.idobjectvalidator.identity.id-schema-version-path @@ -75,6 +114,10 @@ application-default.properties,auth.server.admin.issuer.uri application-default.properties,mosip.kernel.auth.appids.realm.map application-default.properties,mosip.kernel.keymanager-service-CsSign-url application-default.properties,mosip.kernel.transliteration.english-language-code +<<<<<<< HEAD +======= +application-default.properties,mosip.kernel.transliteration.french-language-code +>>>>>>> [DSD-2904] added property migrator application-default.properties,mosip.kernel.syncdata-service-get-tpm-publicKey-url application-default.properties,mosip.kernel.keymanager-service-csverifysign-url application-default.properties,packetmanager.packet.signature.disable-verification @@ -96,6 +139,10 @@ data-share-default.properties,auth.server.admin.allowed.audience data-share-default.properties,mosip.auth.filter_disable data-share-default.properties,object.store.s3.accesskey data-share-default.properties,object.store.s3.secretkey +<<<<<<< HEAD +======= +data-share-default.properties,object.store.s3.url +>>>>>>> [DSD-2904] added property migrator data-share-default.properties,object.store.s3.region data-share-default.properties,object.store.s3.readlimit kernel-default.properties,softhsm.kernel.pin @@ -418,9 +465,52 @@ kernel-default.properties,auth.allowed.urls application-default.properties,mosip.recommended.centers.locCode application-default.properties,mosipbox.public.url application-default.properties,mosip.api.internal.url +<<<<<<< HEAD application-default.properties,config.server.file.storage.uri application-default.properties,mosip.idobjectvalidator.masterdata.rest.uri application-default.properties,mosip.kernel.idobjectvalidator.identity.id-schema-version-path +======= +application-default.properties,mosip.kernel.authmanager.url +application-default.properties,mosip.kernel.masterdata.url +application-default.properties,mosip.kernel.keymanager.url +application-default.properties,mosip.kernel.auditmanager.url +application-default.properties,mosip.kernel.notification.url +application-default.properties,mosip.kernel.idgenerator.url +application-default.properties,mosip.kernel.otpmanager.url +application-default.properties,mosip.kernel.syncdata.url +application-default.properties,mosip.kernel.pridgenerator.url +application-default.properties,mosip.kernel.ridgenerator.url +application-default.properties,mosip.idrepo.identity.url +application-default.properties,mosip.idrepo.vid.url +application-default.properties,mosip.admin.hotlist.url +application-default.properties,mosip.admin.service.url +application-default.properties,mosip.admin.ui.url +application-default.properties,mosip.pms.policymanager.url +application-default.properties,mosip.pms.partnermanager.url +application-default.properties,mosip.pms.ui.url +application-default.properties,mosip.idrepo.credrequest.generator.url +application-default.properties,mosip.idrepo.credential.service.url +application-default.properties,mosip.datashare.url +application-default.properties,mosip.mock.biosdk.url +application-default.properties,mosip.regproc.biosdk.url +application-default.properties,mosip.idrepo.biosdk.url +application-default.properties,mosip.regproc.workflow.url +application-default.properties,mosip.regproc.status.service.url +application-default.properties,mosip.regproc.transaction.service.url +application-default.properties,mosip.packet.receiver.url +application-default.properties,mosip.websub.url +application-default.properties,mosip.consolidator.url +application-default.properties,mosip.file.server.url +application-default.properties,mosip.ida.internal.url +application-default.properties,mosip.ida.auth.url +application-default.properties,mosip.ida.otp.url +application-default.properties,mosip.resident.url +application-default.properties,mosip.artifactory.url +application-default.properties,config.server.file.storage.uri +application-default.properties,mosip.idobjectvalidator.masterdata.rest.uri +application-default.properties,mosip.kernel.idobjectvalidator.identity.id-schema-version-path +application-default.properties,mosip.kernel.idobjectvalidator.identity.dob-path +>>>>>>> [DSD-2904] added property migrator application-default.properties,mosip.idobjectvalidator.refresh-cache-on-unknown-value application-default.properties,mosip.kernel.idobjectvalidator.mandatory-attributes.reg-processor.biometric_correction application-default.properties,mosip.kernel.idobjectvalidator.mandatory-attributes.reg-processor.opencrvs_new @@ -430,16 +520,32 @@ application-default.properties,auth.server.admin.issuer.uri application-default.properties,mosip.kernel.auth.appids.realm.map application-default.properties,mosip.kernel.keymanager-service-CsSign-url application-default.properties,mosip.kernel.transliteration.english-language-code +<<<<<<< HEAD +======= +application-default.properties,mosip.kernel.transliteration.french-language-code +>>>>>>> [DSD-2904] added property migrator application-default.properties,mosip.kernel.syncdata-service-get-tpm-publicKey-url application-default.properties,mosip.kernel.keymanager-service-csverifysign-url application-default.properties,packetmanager.packet.signature.disable-verification application-default.properties,provider.packetreader.opencrvs application-default.properties,provider.packetwriter.opencrvs +<<<<<<< HEAD +======= +application-default.properties,logging.level.io.mosip.registration.processor.status +>>>>>>> [DSD-2904] added property migrator application-default.properties,mosip.mandatory-languages application-default.properties,mosip.optional-languages application-default.properties,mosip.min-languages.count application-default.properties,mosip.max-languages.count +<<<<<<< HEAD application-default.properties,mosip.identity.mapping-file +======= +application-default.properties,mosip.default.template-languages +application-default.properties,mosip.default.user-preferred-language-attribute +application-default.properties,mosip.identity.mapping-file +application-default.properties,mosip.notification.timezone +application-default.properties,mosip.centertypecode.validate.regex +>>>>>>> [DSD-2904] added property migrator application-default.properties,openapi.service.servers[0].url application-default.properties,openapi.service.servers[0].description application-default.properties,mosip.auth.filter_disable @@ -546,6 +652,14 @@ id-authentication-default.properties,mosip.role.keymanager.postvalidate id-authentication-default.properties,mosip.role.keymanager.postpdfsign id-authentication-default.properties,mosip.role.keymanager.postjwtsign id-authentication-default.properties,mosip.role.keymanager.postjwtverify +<<<<<<< HEAD +id-authentication-default.properties,ida-topic-pmp-oidc-client-updated +id-authentication-default.properties,idp.amr-acr.mapping.json.filename +id-authentication-default.properties,ida.api.id.kycauth +id-authentication-default.properties,ida-topic-pmp-oidc-client-created +id-authentication-default.properties,ida-topic-pmp-ca-certificate-uploaded +======= +>>>>>>> [DSD-2904] added property migrator id-repository-default.properties,mosip.idrepo.auth.client-id id-repository-default.properties,mosip.idrepo.auth.secret-key id-repository-default.properties,mosip.idrepo.auth.app-id @@ -562,6 +676,10 @@ id-repository-default.properties,mosip.idrepo.credential.cancel-request.rest.htt id-repository-default.properties,mosip.idrepo.credential.cancel-request.rest.headers.mediaType id-repository-default.properties,mosip.idrepo.credential.cancel-request.rest.timeout id-repository-default.properties,mosip.idrepo.credential-status-update-job.fixed-delay-in-ms +<<<<<<< HEAD +======= +id-repository-default.properties,idrepo-dummy-online-verification-partner-id +>>>>>>> [DSD-2904] added property migrator id-repository-default.properties,mosip.idrepo.websub.vid-credential-update.callback-url id-repository-default.properties,mosip.idrepo.websub.vid-credential-update.topic id-repository-default.properties,mosip.idrepo.websub.vid-credential-update.secret @@ -626,7 +744,17 @@ id-repository-default.properties,mosip.role.idrepo.vid.postvidregenerate id-repository-default.properties,mosip.role.idrepo.vid.postviddeactivate id-repository-default.properties,mosip.role.idrepo.vid.postvidreactivate id-repository-default.properties,mosip.role.idrepo.vid.postdraftvid +<<<<<<< HEAD +id-repository-default.properties,credential.batch.status +id-repository-default.properties,mosip.role.idrepo.identity.getRidByIndividualId +mock-abis-default.properties,secret_url +======= mock-abis-default.properties,secret_url +mock-abis-default.properties,secret_url.clientnId +mock-abis-default.properties,secret_url.id +mock-abis-default.properties,secret_url.secretKey +mock-abis-default.properties,secret_url.appId +>>>>>>> [DSD-2904] added property migrator partner-management-default.properties,auth.allowed.urls partner-management-default.properties,pmp.partner.mobileNumbe.max.length partner-management-default.properties,mosip.iam.adapter.clientid @@ -660,8 +788,16 @@ partner-management-default.properties,pms.notifications-schedule.fixed-rate partner-management-default.properties,partner.register.as.user.in.iam.enable partner-management-default.properties,mosip.iam.post-logout-uri-param-key partner-management-default.properties,mosip.iam.end-session-endpoint-path +<<<<<<< HEAD pre-registration-default.properties,mosip.preregistration.sync.sign.appid pre-registration-default.properties,mosip.preregistration.sync.sign.refid +======= +pre-registration-default.properties,mosip.prereg.application.url +pre-registration-default.properties,mosip.prereg.booking.url +pre-registration-default.properties,mosip.preregistration.sync.sign.appid +pre-registration-default.properties,mosip.preregistration.sync.sign.refid +pre-registration-default.properties,preregistration.job.schedule.cron.purgeExpiredRegCenterSlots +>>>>>>> [DSD-2904] added property migrator pre-registration-default.properties,mosip.kernel.masterdata.day.codes.map pre-registration-default.properties,cancel.appointment.email.subject pre-registration-default.properties,auth.server.admin.allowed.audience @@ -679,7 +815,13 @@ pre-registration-default.properties,mosip.lang.traslate.adapter.impl.basepackage pre-registration-default.properties,mosip.id.validation.identity.email pre-registration-default.properties,mosip.id.validation.identity.phone pre-registration-default.properties,mosip.preregistration.captcha.sitekey +<<<<<<< HEAD pre-registration-default.properties,object.store.s3.use.account.as.bucketname +======= +pre-registration-default.properties,"mosip.security.origins:localhost:8080,localhost:4200,${mosip.api.internal.url}" +pre-registration-default.properties,object.store.s3.use.account.as.bucketname +pre-registration-default.properties,spring.cache.type +>>>>>>> [DSD-2904] added property migrator pre-registration-default.properties,mosip.preregistration.appointment.getavailablity.url pre-registration-default.properties,mosip.preregistration.appointment.book.url pre-registration-default.properties,mosip.preregistration.appointment.multi.book.url @@ -698,6 +840,10 @@ pre-registration-default.properties,mosip.security.authentication.provider.beans pre-registration-default.properties,mosip.security.authentication.provider.beans.list.pre-registration-booking-service pre-registration-default.properties,object.store.s3.accesskey pre-registration-default.properties,object.store.s3.secretkey +<<<<<<< HEAD +======= +pre-registration-default.properties,object.store.s3.url +>>>>>>> [DSD-2904] added property migrator pre-registration-default.properties,object.store.s3.region pre-registration-default.properties,object.store.s3.readlimit pre-registration-default.properties,mosip.role.prereg.postapplications @@ -742,6 +888,10 @@ pre-registration-default.properties,mosip.role.prereg.getappointmentpreregistrat pre-registration-default.properties,mosip.role.prereg.getappointmentregistrationcenterid print-default.properties,mosip.event.delay-millisecs print-default.properties,print-websub-resubscription-delay-millisecs +<<<<<<< HEAD +======= +print-default.properties,mosip.template-language +>>>>>>> [DSD-2904] added property migrator print-default.properties,mosip.optional-languages print-default.properties,mosip.mandatory-languages print-default.properties,mosip.iam.adapter.clientid @@ -759,6 +909,15 @@ registration-default.properties,mosip.registration.reviewer_authentication_confi registration-default.properties,mosip.registration.supervisor_approval_config_flag registration-default.properties,mosip.registration.idle_time registration-default.properties,mosip.kernel.transliteration.english-language-code +<<<<<<< HEAD +======= +registration-default.properties,mosip.registration.onboard_yourself_url +registration-default.properties,mosip.registration.registering_individual_url +registration-default.properties,mosip.registration.sync_data_url +registration-default.properties,mosip.registration.mapping_devices_url +registration-default.properties,mosip.registration.uploading_data_url +registration-default.properties,mosip.registration.updating_biometrics_url +>>>>>>> [DSD-2904] added property migrator registration-default.properties,mosip.registration.mdm.validate.trust registration-default.properties,mosip.registration.mdm.connection.timeout registration-default.properties,mosip.registration.mdm.RCAPTURE.connection.timeout @@ -814,6 +973,16 @@ registration-default.properties,mosip.kernel.rid.sequence-length registration-default.properties,mosip.kernel.virus-scanner.host registration-default.properties,mosip.kernel.virus-scanner.port registration-default.properties,mosip.kernel.otp.expiry-time +<<<<<<< HEAD +======= +registration-default.properties,mosip.primary-language +registration-default.properties,mosip.secondary-language +registration-default.properties,mosip.kernel.applicant.type.age.limit +registration-processor-default.properties,mosip.regproc.notification.url +registration-processor-default.properties,mosip.preferred-language.enabled +registration-processor-default.properties,registration.processor.main-processes +registration-processor-default.properties,registration.processor.sub-processes +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,IDAINTERNAL registration-processor-default.properties,LANGUAGE registration-processor-default.properties,IDENTITY @@ -824,6 +993,10 @@ registration-processor-default.properties,IDREPOUPDATEDRAFT registration-processor-default.properties,IDREPOPUBLISHDRAFT registration-processor-default.properties,IDREPOEXTRACTBIOMETRICS registration-processor-default.properties,KEYMANAGER +<<<<<<< HEAD +======= +registration-processor-default.properties,registration.processor.demodedupe.manual.adjudication.status +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,DEVICEHOTLIST registration-processor-default.properties,JWTVERIFY registration-processor-default.properties,NOTIFIER @@ -837,6 +1010,11 @@ registration-processor-default.properties,mosip.registration.processor.packet.st registration-processor-default.properties,mosip.registration.processor.packet.status.transactiontypecodes-time-based-resend-required registration-processor-default.properties,mosip.registration.processor.registration.status.external-statuses-to-consider-processed registration-processor-default.properties,mosip.registration.processor.postalcode.req.url +<<<<<<< HEAD +======= +registration-processor-default.properties,mosip.identity.auth.internal.env +registration-processor-default.properties,mosip.registration.processor.lostrid.registrationdate.pattern +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,mosip.regproc.packet.receiver.eventbus.kafka.commit.type registration-processor-default.properties,mosip.regproc.packet.receiver.eventbus.kafka.max.poll.records registration-processor-default.properties,mosip.regproc.packet.receiver.eventbus.kafka.poll.frequency @@ -845,13 +1023,25 @@ registration-processor-default.properties,mosip.regproc.packet.receiver.server.s registration-processor-default.properties,mosip.regproc.packet.receiver.server.port registration-processor-default.properties,mosip.regproc.packet.receiver.eventbus.port registration-processor-default.properties,mosip.regproc.packet.receiver.message.tag.loading.disable +<<<<<<< HEAD +registration-processor-default.properties,registration.processor.notification_service_subscriber_callback_url +======= +registration-processor-default.properties,mosip.regproc.virusscanner.provider +registration-processor-default.properties,registration.processor.notification_service_subscriber_secret registration-processor-default.properties,registration.processor.notification_service_subscriber_callback_url +registration-processor-default.properties,registration.processor.notification_service_pausedforadditonalinfo_subscriber_secret +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,mosip.regproc.workflow.pausedforadditionalinfo.topic registration-processor-default.properties,registration.processor.notification_service_pausedforadditonalinfo_subscriber_callback_url registration-processor-default.properties,mosip.regproc.notification_service.biometric_correction.email registration-processor-default.properties,mosip.regproc.notification_service.biometric_correction.sms registration-processor-default.properties,mosip.regproc.notification_service.biometric_correction.subject registration-processor-default.properties,registration.processor.queue.connection.retry.count +<<<<<<< HEAD +======= +registration-processor-default.properties,registration.processor.queue.manualverification.request +registration-processor-default.properties,registration.processor.queue.manualverification.response +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,registration.processor.reprocess.limit registration-processor-default.properties,registration.processor.pause.packets.for.backpressure registration-processor-default.properties,mosip.regproc.verification.eventbus.kafka.commit.type @@ -860,8 +1050,16 @@ registration-processor-default.properties,mosip.regproc.verification.eventbus.ka registration-processor-default.properties,mosip.regproc.verification.eventbus.kafka.group.id registration-processor-default.properties,mosip.regproc.verification.message.expiry-time-limit registration-processor-default.properties,registration.processor.verification.queue.typeOfQueue +<<<<<<< HEAD registration-processor-default.properties,registration.processor.verification.policy.id registration-processor-default.properties,registration.processor.verification.subscriber.id +======= +registration-processor-default.properties,registration.processor.verification.queue.response +registration-processor-default.properties,registration.processor.queue.verification.request +registration-processor-default.properties,registration.processor.verification.policy.id +registration-processor-default.properties,registration.processor.verification.subscriber.id +registration-processor-default.properties,registration.processor.queue.verification.request.messageTTL +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,mosip.regproc.verification.eventbus.port registration-processor-default.properties,mosip.regproc.verification.server.port registration-processor-default.properties,mosip.regproc.verification.server.servlet.path @@ -879,6 +1077,12 @@ registration-processor-default.properties,mosip.registration.processor.manual.ad registration-processor-default.properties,mosip.registration.processor.manual.adjudication.biometric.id registration-processor-default.properties,mosip.registration.processor.manual.adjudication.demographic.id registration-processor-default.properties,mosip.registration.processor.manual.adjudication.packetinfo.id +<<<<<<< HEAD +======= +registration-processor-default.properties,registration.processor.manual.adjudication.queue.response +registration-processor-default.properties,registration.processor.queue.manual.adjudication.request.messageTTL +registration-processor-default.properties,registration.processor.manual.adjudication.reprocess.buffer.time +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,mosip.regproc.workflow.complete.topic registration-processor-default.properties,mosip.regproc.workflow.action.job.server.port registration-processor-default.properties,mosip.regproc.workflow.action.job.eventbus.port @@ -910,6 +1114,10 @@ registration-processor-default.properties,mosip.regproc.workflow.manager.message registration-processor-default.properties,PACKETMANAGER_DELETE_TAGS registration-processor-default.properties,PACKETMANAGER_GET_TAGS registration-processor-default.properties,mosip.regproc.data.share.internal.domain.name +<<<<<<< HEAD +======= +registration-processor-default.properties,mosip.regproc.data.share.protocol +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,mosip.biosdk.default.service.url registration-processor-default.properties,mosip.biometric.sdk.providers.finger.mosip-ref-impl-sdk-client.classname registration-processor-default.properties,mosip.biometric.sdk.providers.finger.mosip-ref-impl-sdk-client.version @@ -924,6 +1132,10 @@ registration-processor-default.properties,mosip.biometric.sdk.providers.face.mos registration-processor-default.properties,mosip.regproc.mosip-stage-executor.stage-beans-base-packages.default registration-processor-default.properties,mosip.anonymous.profile.eventbus.address registration-processor-default.properties,mosip.regproc.camelbridge.endpoint-prefix +<<<<<<< HEAD +======= +registration-processor-default.properties,mosip.regproc.camelbridge.pause-settings +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,mosip.regproc.securezone.notification.server.port registration-processor-default.properties,mosip.regproc.securezone.notification.server.servlet.path registration-processor-default.properties,mosip.regproc.securezone.notification.eventbus.port @@ -937,6 +1149,10 @@ registration-processor-default.properties,mosip.regproc.packet.validator.server. registration-processor-default.properties,mosip.regproc.packet.validator.eventbus.port registration-processor-default.properties,mosip.regproc.packet.validator.server.servlet.path registration-processor-default.properties,mosip.regproc.packet.validator.validate-applicant-document +<<<<<<< HEAD +======= +registration-processor-default.properties,mosip.regproc.packet.validator.validate-applicant-document.processes +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,mosip.regproc.operator-validator.eventbus.kafka.commit.type registration-processor-default.properties,mosip.regproc.operator-validator.eventbus.kafka.max.poll.records registration-processor-default.properties,mosip.regproc.operator-validator.eventbus.kafka.poll.frequency @@ -959,6 +1175,10 @@ registration-processor-default.properties,mosip.regproc.cmd-validator.device-val registration-processor-default.properties,mosip.regproc.cmd-validator.working-hour-validation-required registration-processor-default.properties,mosip.regproc.cmd-validator.device.disable-trust-validation registration-processor-default.properties,mosip.regproc.cmd-validator.device.allowed-digital-id-timestamp-variation +<<<<<<< HEAD +======= +registration-processor-default.properties,mosip.regproc.cmd-validator.device.digital-id-timestamp-format +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,mosip.regproc.packet.classifier.server.port registration-processor-default.properties,mosip.regproc.packet.classifier.eventbus.port registration-processor-default.properties,mosip.regproc.packet.classifier.server.servlet.path @@ -970,8 +1190,15 @@ registration-processor-default.properties,mosip.regproc.quality.classifier.messa registration-processor-default.properties,mosip.regproc.quality.classifier.server.port registration-processor-default.properties,mosip.regproc.quality.classifier.eventbus.port registration-processor-default.properties,mosip.regproc.quality.classifier.server.servlet.path +<<<<<<< HEAD +registration-processor-default.properties,mosip.regproc.quality.classifier.tagging.quality.prefix +registration-processor-default.properties,mosip.regproc.quality.classifier.tagging.quality.biometric-not-available-tag-value +======= +registration-processor-default.properties,mosip.regproc.quality.classifier.tagging.quality.ranges registration-processor-default.properties,mosip.regproc.quality.classifier.tagging.quality.prefix registration-processor-default.properties,mosip.regproc.quality.classifier.tagging.quality.biometric-not-available-tag-value +registration-processor-default.properties,mosip.regproc.quality.classifier.tagging.quality.modalities +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,mosip.regproc.introducer-validator.eventbus.kafka.commit.type registration-processor-default.properties,mosip.regproc.introducer-validator.eventbus.kafka.max.poll.records registration-processor-default.properties,mosip.regproc.introducer-validator.eventbus.kafka.poll.frequency @@ -986,6 +1213,13 @@ registration-processor-default.properties,mosip.regproc.demo.dedupe.server.servl registration-processor-default.properties,mosip.regproc.abis.handler.server.port registration-processor-default.properties,mosip.regproc.abis.handler.eventbus.port registration-processor-default.properties,mosip.regproc.abis.handler.server.servlet.path +<<<<<<< HEAD +======= +registration-processor-default.properties,mosip.regproc.abis.handler.biometric-modalities-segments-mapping.INFANT +registration-processor-default.properties,mosip.regproc.abis.handler.biometric-modalities-segments-mapping.MINOR +registration-processor-default.properties,mosip.regproc.abis.handler.biometric-modalities-segments-mapping.ADULT +registration-processor-default.properties,mosip.regproc.abis.handler.biometric-segments-exceptions-mapping +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,mosip.regproc.bio.dedupe.server.port registration-processor-default.properties,mosip.regproc.bio.dedupe.eventbus.port registration-processor-default.properties,mosip.regproc.bio.dedupe.server.servlet.path @@ -1120,6 +1354,10 @@ registration-processor-default.properties,mosip.iam.adapter.self-token-renewal-e registration-processor-default.properties,mosip.auth.filter_disable registration-processor-default.properties,object.store.s3.accesskey registration-processor-default.properties,object.store.s3.secretkey +<<<<<<< HEAD +======= +registration-processor-default.properties,object.store.s3.url +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,object.store.s3.region registration-processor-default.properties,object.store.s3.readlimit registration-processor-default.properties,mosip.role.registration.getGetsearchrid @@ -1132,6 +1370,10 @@ registration-processor-default.properties,mosip.role.registration.getPostlostrid registration-processor-default.properties,mosip.role.registration.getPostsync registration-processor-default.properties,mosip.role.registration.getPostsyncv2 registration-processor-default.properties,auth.server.admin.allowed.audience +<<<<<<< HEAD +======= +registration-processor-default.properties,mosip.regproc.cbeff-validation.mandatory.modalities +>>>>>>> [DSD-2904] added property migrator registration-processor-default.properties,registration.processor.lostrid.max.registrationid registration-processor-default.properties,mosip.registration.processor.lostrid.max-registration-date-filter-interval resident-default.properties,MACHINESEARCH @@ -1146,6 +1388,11 @@ resident-default.properties,auth.server.admin.issuer.uri resident-default.properties,mosip.service-context resident-default.properties,mosip.service.end-points resident-default.properties,mosip.service.exclude.auth.allowed.method +<<<<<<< HEAD +resident-default.properties,resident.update-uin.machine-spec-id +resident-default.properties,ida.online-verification-partner-id +======= +>>>>>>> [DSD-2904] added property migrator syncdata-default.properties,mosip.iam.role-based-user-url syncdata-default.properties,mosip.iam.adapter.clientid syncdata-default.properties,mosip.iam.adapter.clientsecret @@ -1274,13 +1521,21 @@ resident-default.properties,sequence-order resident-default.properties,resident.template.bell-icon.request-received.order-a-physical-card resident-default.properties,mosip.resident.sign.pdf.reference.id resident-default.properties,resident.template.ack.update-demographic-data +<<<<<<< HEAD resident-default.properties,resident.update-uin.machine-name-prefix +======= +resident-default.properties,resident.update-uin.machine-name-prefix +>>>>>>> [DSD-2904] added property migrator resident-default.properties,mosip.scope.resident.postAuthTypeStatus resident-default.properties,resident.template.tnc.share-cred-with-partner resident-default.properties,mosip.resident.ack.personalised_card.name.convention resident-default.properties,resident.template.bell-icon.success.update-demo-data resident-default.properties,resident.template.bell-icon.request-received.verify-my-phone-email resident-default.properties,resident.template.email.subject.success.vid-card-download +<<<<<<< HEAD +======= +resident-default.properties,otpChannel.mobile +>>>>>>> [DSD-2904] added property migrator resident-default.properties,resident-ui-schema-file-name-prefix resident-default.properties,resident.template.email.subject.success.get-my-uin-card resident-default.properties,mosip.scope.resident.postgeneratevid @@ -1354,7 +1609,11 @@ resident-default.properties,mosip.scope.resident.getAuthTransactions resident-default.properties,mosip.resident.phone.token.claim-phone resident-default.properties,mosip.resident.request.vid.card.version resident-default.properties,spring.jpa.properties.hibernate.temp.use_jdbc_metadata_defaults +<<<<<<< HEAD resident-default.properties,hibernate.jdbc.lob.non_contextual_creation +======= +resident-default.properties,hibernate.jdbc.lob.non_contextual_creation +>>>>>>> [DSD-2904] added property migrator resident-default.properties,mosip.email.template.property resident-default.properties,mosip.full.name.template.property resident-default.properties,mosip.resident.download.service.history.file.name.convention @@ -1512,7 +1771,11 @@ resident-default.properties,resident.template.purpose.failure.get-my-uin-card resident-default.properties,resident.template.purpose.failure.order-a-physical-card resident-default.properties,resident.service-history.download.max.count resident-default.properties,resident.template.sms.request-received.verify-my-phone-email +<<<<<<< HEAD resident-default.properties,resident.download.card.eventid.id +======= +resident-default.properties,resident.download.card.eventid.id +>>>>>>> [DSD-2904] added property migrator resident-default.properties,mosip.iam.module.login_flow.response_type resident-default.properties,resident.template.email.content.success.verify-my-phone-email resident-default.properties,resident.grievance-redressal.alt-email.chars.limit @@ -1623,3 +1886,28 @@ resident-default.properties,mosip.scope.resident.postRequestShareCredWithPartner resident-default.properties,resident.template.support-docs-list resident-default.properties,resident.identity.info.version resident-default.properties,resident.template.email.content.failure.cust-and-down-my-card +<<<<<<< HEAD +packet-manager-default.properties,auth.server.admin.allowed.audience +packet-manager-default.properties,packetmanager.additional.fields.search.from.metainfo +packet-manager-default.properties,mosip.role.commons-packet.putcreatepacket +packet-manager-default.properties,mosip.role.commons-packet.postaddtag +packet-manager-default.properties,mosip.role.commons-packet.postaddorupdatetag +packet-manager-default.properties,mosip.role.commons-packet.postdeletetag +packet-manager-default.properties,mosip.role.commons-packet.postinfo +packet-manager-default.properties,mosip.role.commons-packet.postgettags +packet-manager-default.properties,mosip.role.commons-packet.postvalidatepacket +packet-manager-default.properties,mosip.role.commons-packet.postaudits +packet-manager-default.properties,mosip.role.commons-packet.postmetainfo +packet-manager-default.properties,mosip.role.commons-packet.postbiometrics +packet-manager-default.properties,mosip.role.commons-packet.postdocument +packet-manager-default.properties,mosip.role.commons-packet.postsearchfields +packet-manager-default.properties,object.store.s3.accesskey +packet-manager-default.properties,object.store.s3.secretkey +packet-manager-default.properties,object.store.s3.region +packet-manager-default.properties,object.store.s3.readlimit +packet-manager-default.properties,mosip.iam.adapter.appid +packet-manager-default.properties,mosip.iam.adapter.clientid +packet-manager-default.properties,mosip.iam.adapter.clientsecret +credential-service-default.properties,credentialType.formatter.VERCRED +======= +>>>>>>> [DSD-2904] added property migrator diff --git a/deployment/v3/utils/prop_migrator/knowledge/old-value-takes-priority.csv b/deployment/v3/utils/prop_migrator/knowledge/old-value-takes-priority.csv index f8cbdc3fe..9b5eecc1b 100644 --- a/deployment/v3/utils/prop_migrator/knowledge/old-value-takes-priority.csv +++ b/deployment/v3/utils/prop_migrator/knowledge/old-value-takes-priority.csv @@ -48,8 +48,13 @@ registration-processor-default.properties,packetmanager.provider.uingenerator.pa registration-processor-default.properties,packetmanager.provider.uingenerator.parentOrGuardianRID registration-processor-default.properties,registration.processor.reprocess.elapse.time registration-processor-default.properties,registration.processor.reprocess.hours +registration-processor-default.properties,mosip.registration.processor.database.port registration-processor-default.properties,registration.processor.LANDING_ZONE registration-processor-default.properties,mosip.regproc.packet.classifier.tagging.agegroup.ranges +registration-processor-default.properties,mosip.kernel.virus-scanner.host +registration-processor-default.properties,registration.processor.reprocess.minutes: +registration-processor-default.properties,mosip.kernel.virus-scanner.port: +registration-processor-default.properties,registration.processor.queue.username: registration-default.properties,mosip.registration.num_of_fingerprint_retries registration-default.properties,mosip.registration.num_of_iris_retries registration-default.properties,mosip.registration.num_of_face_retries @@ -117,6 +122,8 @@ application-default.properties,mosip.kernel.machineid.length application-default.properties,mosip.kernel.rid.length application-default.properties,mosip.kernel.rid.timestamp-length application-default.properties,mosip.kernel.rid.sequence-length +application-default.properties,mosip.kernel.virus-scanner.host +application-default.properties,mosip.kernel.virus-scanner.port application-default.properties,mosip.kernel.otp.expiry-time application-default.properties,mosip.primary-language application-default.properties,mosip.secondary-language diff --git a/deployment/v3/utils/readuser-util/README.md b/deployment/v3/utils/readuser-util/README.md new file mode 100644 index 000000000..99c617859 --- /dev/null +++ b/deployment/v3/utils/readuser-util/README.md @@ -0,0 +1,39 @@ +# ReadUser Creation Utility: + +This Utility is used for the creation of user with readonly privilages for postgres, minio and keycloak server. + +- This utility is designed to initialize MinIO with user management and policy attachment based on the specified action (create or delete). +- Also runs a script that creates a read-only user in a PostgreSQL database. +- To Initialize keycloak with user with readonly privilages. + +## Prerequisites +Ensure the following prerequisites are met before deploying the utility: + +- Kubernetes Cluster: A running Kubernetes cluster. +- MinIO Deployment: MinIO server should be deployed and running. +- Postgres Deployment: Postgres server should be deployed and running. +- Keycloak Deployment: Keycloak server should be deployed and running. +- Kubernetes ConfigMap and Secrets: + * Secret containing the "postgres-password" + * Secret containing the MinIO access and secret keys. +- Configuration for the username, password, policy name, and action should be managed via a values file (typically used with Helm charts) for s3-readuser-util chart. +- Configuration for the username, password, dbhost, dbport and action should be managed via a values file (typically used with Helm charts) for postgres-readuser-util chart. +- readuser-init-values.yaml file with user configurations to initialize keycloak. + +### Notes: + +* The action (create or delete), username, password, and policy name should be set in the values.yaml file, which will be referenced in the Job manifest. +* The utility uses mc client to create readuser in minio server. +* The utility uses sql script which is passed in configmaps to create readuser in postgres server. +* If you want to create readuser then the "action" key value from values.yaml needs to be "create". +* If you want to delete readuser then the "action" key value from values.yaml needs to be "delete". +* Once after exicuting this utility please confirm from the minio, postgres and keycloak server whether the user created has the necessary privilages or not. +* And in case of deleteion of user crosscheck from the minio, postgres and keycloak server if the user is deleted or not. + +### Install + +* `install.sh` + +### Delete + +* `delete.sh` \ No newline at end of file diff --git a/deployment/v3/utils/readuser-util/copy_cm.sh b/deployment/v3/utils/readuser-util/copy_cm.sh new file mode 100755 index 000000000..0e9f30dd3 --- /dev/null +++ b/deployment/v3/utils/readuser-util/copy_cm.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copy configmaps from other namespaces + +function copying_cm() { + COPY_UTIL=../copy_cm_func.sh + DST_NS=util # DST_NS: Destination namespace + + $COPY_UTIL configmap keycloak-host keycloak $DST_NS + $COPY_UTIL configmap keycloak-env-vars keycloak $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_cm # calling function diff --git a/deployment/v3/utils/readuser-util/copy_secrets.sh b/deployment/v3/utils/readuser-util/copy_secrets.sh new file mode 100755 index 000000000..0ce3be107 --- /dev/null +++ b/deployment/v3/utils/readuser-util/copy_secrets.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copy secrets from other namespaces + +function copying_secrets() { + COPY_UTIL=../copy_cm_func.sh + DST_NS=util # DST_NS: Destination namespace + $COPY_UTIL secret minio minio $DST_NS + $COPY_UTIL secret postgres-postgresql postgres $DST_NS + $COPY_UTIL secret keycloak keycloak $DST_NS + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +copying_secrets # calling function diff --git a/deployment/v3/utils/readuser-util/delete.sh b/deployment/v3/utils/readuser-util/delete.sh new file mode 100755 index 000000000..1920da5af --- /dev/null +++ b/deployment/v3/utils/readuser-util/delete.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# Uninstalls readuser-util +## Usage: ./delete.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +function deleting_readuser-util() { + NS=util + while true; do + read -p "Are you sure you want to delete readuser-util helm charts?(Y/n) " yn + if [ $yn = "Y" ] + then + helm -n $NS delete readuser-util + helm -n $NS delete readuser-iam-init + break + else + break + fi + done + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +deleting_readuser-util # calling function \ No newline at end of file diff --git a/deployment/v3/utils/readuser-util/install.sh b/deployment/v3/utils/readuser-util/install.sh new file mode 100755 index 000000000..6a115dac0 --- /dev/null +++ b/deployment/v3/utils/readuser-util/install.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# Installs readuser-util +## Usage: ./install.sh [kubeconfig] + +if [ $# -ge 1 ] ; then + export KUBECONFIG=$1 +fi + +NS=util +CHART_VERSION=0.0.1-develop + +echo Create $NS namespace +kubectl create ns $NS + +function installing_readuser-util() { + + echo Copy secrets + sed -i 's/\r$//' copy_secrets.sh + ./copy_secrets.sh + + echo Copy configmaps + sed -i 's/\r$//' copy_cm.sh + ./copy_cm.sh + + echo Installing readuser-util + helm -n $NS install readuser-util mosip/readuser-util --version $CHART_VERSION -f values.yaml --wait + helm -n $NS install readuser-iam-init mosip/keycloak-init --version $CHART_VERSION -f readuser-init-values.yaml --wait + + echo Installed readuser-util + return 0 +} + +# set commands for error handling. +set -e +set -o errexit ## set -e : exit the script if any statement returns a non-true return value +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable +set -o errtrace # trace ERR through 'time command' and other functions +set -o pipefail # trace ERR through pipes +installing_readuser-util # calling function diff --git a/deployment/v3/utils/readuser-util/readuser-init-values.yaml b/deployment/v3/utils/readuser-util/readuser-init-values.yaml new file mode 100644 index 000000000..7a99a9881 --- /dev/null +++ b/deployment/v3/utils/readuser-util/readuser-init-values.yaml @@ -0,0 +1,25 @@ +## YAML to create a users +keycloak: + realms: + mosip: # realm + roles: [] + clients: [] + users: + - username: readuser + email: read_user15@xyz.com + firstName: read + lastName: user + password: mosip123 + temporary: False + attributes: {} + realmRoles: + - offline_access + - uma_authorization + client: "realm-management" + clientRoles: + - "view-users" + - "view-authorization" + - "view-clients" + - "view-events" + - "view-realm" + - "view-identity-providers" diff --git a/deployment/v3/utils/readuser-util/values.yaml b/deployment/v3/utils/readuser-util/values.yaml new file mode 100644 index 000000000..4dee20a27 --- /dev/null +++ b/deployment/v3/utils/readuser-util/values.yaml @@ -0,0 +1,32 @@ +# Default values for readuser. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +postgres-readuser-util: + enabled: true + # Values for readuser creation. + # "action" key specifies whether you want to "create" or "delete" readuser. + # "username" key specifies the username of readuser creating. + # "password" key specifies the password for readuser. + # "dbport" key specifies the DB port number. + # "dbhost" key specifies the DB host. + user: + action: "create" + username: "readuser" + password: "mosip123" + dbport: 5432 + dbhost: api-internal.xyz.mosip.net + +s3-readuser-util: + enabled: true + # Values for readuser creation. + # "action" key specifies whether you want to "create" or "delete" readuser. + # "username" key specifies the username of readuser creating. + # "password" key specifies the password for readuser. + # "policyName" key specifies the policyname creating. + s3user: + action: "create" + username: "readuser" + password: "mosip123" + policyName: "dataread" +