diff --git a/.github/workflows/build-x86-image.yaml b/.github/workflows/build-x86-image.yaml index a97d97a097e..71d7437c5b5 100644 --- a/.github/workflows/build-x86-image.yaml +++ b/.github/workflows/build-x86-image.yaml @@ -1080,8 +1080,8 @@ jobs: name: kube-ovn-ic-conformance-e2e-ko-log path: kube-ovn-ic-conformance-e2e-ko-log.tar.gz - chart-installation-test: - name: Chart Installation Test + chart-test: + name: Chart Installation/Uninstallation Test needs: build-kube-ovn runs-on: ubuntu-22.04 timeout-minutes: 30 @@ -1113,8 +1113,8 @@ jobs: - name: Install Kube-OVN run: make kind-install-chart - - name: Cleanup - run: sh -x dist/images/cleanup.sh + - name: Uninstall Kube-OVN + run: make kind-uninstall-chart underlay-logical-gateway-installation-test: name: Underlay Logical Gateway Installation Test @@ -2052,7 +2052,7 @@ jobs: - webhook-e2e - lb-svc-e2e - underlay-logical-gateway-installation-test - - chart-installation-test + - chart-test - installation-compatibility-test - no-ovn-lb-test - no-np-test diff --git a/Makefile b/Makefile index da27ba297b2..a41b4beefaf 100644 --- a/Makefile +++ b/Makefile @@ -447,6 +447,10 @@ kind-upgrade-chart: kind-load-image kubectl -n kube-system rollout status --timeout=1s daemonset/kube-ovn-cni kubectl -n kube-system rollout status --timeout=1s daemonset/kube-ovn-pinger +.PHONY: kind-uninstall-chart +kind-uninstall-chart: + helm uninstall kubeovn + .PHONY: kind-install kind-install: kind-load-image kubectl config use-context kind-kube-ovn diff --git a/charts/templates/pre-delete-hook.yaml b/charts/templates/pre-delete-hook.yaml new file mode 100644 index 00000000000..9fd2a788085 --- /dev/null +++ b/charts/templates/pre-delete-hook.yaml @@ -0,0 +1,123 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-ovn-pre-delete-hook + namespace: kube-system + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": hook-succeeded +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.k8s.io/system-only: "true" + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "2" + "helm.sh/hook-delete-policy": hook-succeeded + name: system:kube-ovn-pre-delete-hook +rules: + - apiGroups: + - kubeovn.io + resources: + - subnets + verbs: + - get + - list + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-ovn-pre-delete-hook + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "3" + "helm.sh/hook-delete-policy": hook-succeeded +roleRef: + name: system:kube-ovn-pre-delete-hook + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: kube-ovn-pre-delete-hook + namespace: kube-system +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ .Chart.Name }}-pre-delete-hook" + namespace: kube-system + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "4" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + completions: 1 + template: + metadata: + name: "{{ .Release.Name }}" + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: kube-ovn-pre-delete-hook + component: job + spec: + tolerations: + - key: "" + operator: "Exists" + effect: "NoSchedule" + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - kube-ovn-pre-delete-hook + - key: component + operator: In + values: + - job + restartPolicy: Never + hostNetwork: true + nodeSelector: + kubernetes.io/os: "linux" + serviceAccount: kube-ovn-pre-delete-hook + serviceAccountName: kube-ovn-pre-delete-hook + containers: + - name: remove-subnet-finalizer + image: "{{ .Values.global.registry.address}}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}" + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + command: + - sh + - -c + - /kube-ovn/remove-subnet-finalizer.sh 2>&1 | tee -a /var/log/kube-ovn/remove-subnet-finalizer.log + volumeMounts: + - mountPath: /var/log/kube-ovn + name: kube-ovn-log + volumes: + - name: kube-ovn-log + hostPath: + path: {{ .Values.log_conf.LOG_DIR }}/kube-ovn diff --git a/charts/templates/upgrade-ovs-ovn.yaml b/charts/templates/upgrade-ovs-ovn.yaml index 0b9fcbd1701..487eb218061 100644 --- a/charts/templates/upgrade-ovs-ovn.yaml +++ b/charts/templates/upgrade-ovs-ovn.yaml @@ -70,7 +70,7 @@ subjects: apiVersion: batch/v1 kind: Job metadata: - name: "{{ .Chart.Name }}" + name: "{{ .Chart.Name }}-post-upgrade-hook" namespace: kube-system labels: app.kubernetes.io/managed-by: {{ .Release.Service | quote }} @@ -120,7 +120,7 @@ spec: serviceAccount: ovs-ovn-upgrade serviceAccountName: ovs-ovn-upgrade containers: - - name: post-upgrade-job + - name: ovs-ovn-upgrade image: "{{ .Values.global.registry.address}}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}" env: - name: POD_NAMESPACE diff --git a/dist/images/remove-subnet-finalizer.sh b/dist/images/remove-subnet-finalizer.sh new file mode 100755 index 00000000000..4d69bd0b207 --- /dev/null +++ b/dist/images/remove-subnet-finalizer.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +set -ex + +for subnet in $(kubectl get subnet -o name); do + kubectl patch "$subnet" --type='json' -p '[{"op": "replace", "path": "/metadata/finalizers", "value": []}]' +done