Skip to content

Commit

Permalink
Remove deprecated kube-rbac-proxy. manual cherry-pick of 422b41e
Browse files Browse the repository at this point in the history
  • Loading branch information
akram authored and openshift-merge-bot[bot] committed Dec 19, 2024
1 parent 3043f4c commit 096163b
Show file tree
Hide file tree
Showing 28 changed files with 340 additions and 84 deletions.
2 changes: 1 addition & 1 deletion apis/config/v1beta1/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ const (
DefaultWebhookSecretName = "kueue-webhook-server-cert"
DefaultWebhookPort = 9443
DefaultHealthProbeBindAddress = ":8081"
DefaultMetricsBindAddress = ":8080"
DefaultMetricsBindAddress = ":8443"
DefaultLeaderElectionID = "c1f6bfd2.kueue.x-k8s.io"
DefaultLeaderElectionLeaseDuration = 15 * time.Second
DefaultLeaderElectionRenewDeadline = 10 * time.Second
Expand Down
15 changes: 8 additions & 7 deletions charts/kueue/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@
## Table of Contents

<!-- toc -->
- [Installation](#installation)
- [Prerequisites](#prerequisites)
- [Installing the chart](#installing-the-chart)
- [Install chart using Helm v3.0+](#install-chart-using-helm-v30)
- [Verify that controller pods are running properly.](#verify-that-controller-pods-are-running-properly)
- [Configuration](#configuration)
- [Kueue's helm chart](#kueues-helm-chart)
- [Table of Contents](#table-of-contents)
- [Installation](#installation)
- [Prerequisites](#prerequisites)
- [Installing the chart](#installing-the-chart)
- [Install chart using Helm v3.0+](#install-chart-using-helm-v30)
- [Verify that controller pods are running properly.](#verify-that-controller-pods-are-running-properly)
- [Configuration](#configuration)
<!-- /toc -->

### Installation
Expand Down Expand Up @@ -48,7 +50,6 @@ The following table lists the configurable parameters of the kueue chart and the
| `fullnameOverride` | override the resource name | `` |
| `enablePrometheus` | enable Prometheus | `false` |
| `enableCertManager` | enable CertManager | `false` |
| `controllerManager.kubeRbacProxy.image` | controllerManager.kubeRbacProxy's image | `gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0` |
| `controllerManager.manager.image` | controllerManager.manager's image | `us-central1-docker.pkg.dev/k8s-staging-images/kueue/kueue:main` |
| `controllerManager.manager.resources` | controllerManager.manager's resources | abbr. |
| `controllerManager.replicas` | ControllerManager's replicaCount | `1` |
Expand Down
13 changes: 0 additions & 13 deletions charts/kueue/templates/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -67,19 +67,6 @@ spec:
- mountPath: /controller_manager_config.yaml
name: manager-config
subPath: controller_manager_config.yaml
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=10
image: "{{ .Values.controllerManager.kubeRbacProxy.image.repository }}:{{ .Values.controllerManager.kubeRbacProxy.image.tag }}"
imagePullPolicy: {{ .Values.controllerManager.kubeRbacProxy.image.pullPolicy }}
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
protocol: TCP
resources: {}
securityContext:
{{- toYaml .Values.controllerManager.manager.podSecurityContext | nindent 8 }}
serviceAccountName: {{ include "kueue.fullname" . }}-controller-manager
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ kind: ClusterRole
metadata:
labels:
{{- include "kueue.labels" . | nindent 4 }}
name: '{{ include "kueue.fullname" . }}-proxy-role'
name: '{{ include "kueue.fullname" . }}-metrics-auth-role'
rules:
- apiGroups:
- authentication.k8s.io
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@ kind: ClusterRoleBinding
metadata:
labels:
{{- include "kueue.labels" . | nindent 4 }}
name: '{{ include "kueue.fullname" . }}-proxy-rolebinding'
name: '{{ include "kueue.fullname" . }}-metrics-auth-rolebinding'
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: '{{ include "kueue.fullname" . }}-proxy-role'
name: '{{ include "kueue.fullname" . }}-metrics-auth-role'
subjects:
- kind: ServiceAccount
name: '{{ include "kueue.fullname" . }}-controller-manager'
Expand Down
11 changes: 2 additions & 9 deletions charts/kueue/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,6 @@ controllerManager:
#featureGates:
# - name: PartialAdmission
# enabled: true
kubeRbacProxy:
image:
repository: gcr.io/kubebuilder/kube-rbac-proxy
# tag
tag: v0.8.0
# This should be set to 'IfNotPresent' for released version
pullPolicy: IfNotPresent
manager:
image:
repository: us-central1-docker.pkg.dev/k8s-staging-images/kueue/kueue
Expand All @@ -27,7 +20,7 @@ controllerManager:
podAnnotations: {}
resources:
limits:
cpu: 500m
cpu: 2
memory: 512Mi
requests:
cpu: 500m
Expand Down Expand Up @@ -59,7 +52,7 @@ managerConfig:
health:
healthProbeBindAddress: :8081
metrics:
bindAddress: :8080
bindAddress: :8443
# enableClusterQueueResources: true
webhook:
port: 9443
Expand Down
13 changes: 13 additions & 0 deletions cmd/kueue/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"

configapi "sigs.k8s.io/kueue/apis/config/v1beta1"
kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1"
Expand Down Expand Up @@ -130,6 +132,17 @@ func main() {
os.Exit(1)
}

// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
// More info:
// - https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/metrics/server
// - https://book.kubebuilder.io/reference/metrics.html
metricsServerOptions := metricsserver.Options{
BindAddress: cfg.Metrics.BindAddress,
SecureServing: true,
FilterProvider: filters.WithAuthenticationAndAuthorization,
}
options.Metrics = metricsServerOptions

metrics.Register()

kubeConfig := ctrl.GetConfigOrDie()
Expand Down
11 changes: 0 additions & 11 deletions config/components/manager/auth_proxy_service.yaml

This file was deleted.

2 changes: 1 addition & 1 deletion config/components/manager/controller_manager_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ kind: Configuration
health:
healthProbeBindAddress: :8081
metrics:
bindAddress: :8080
bindAddress: :8443
enableClusterQueueResources: true
webhook:
port: 9443
Expand Down
3 changes: 0 additions & 3 deletions config/components/manager/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
# Comment the "auth_proxy_service.yaml" entry if you want to disable the service
# for auth proxy (https://github.com/brancz/kube-rbac-proxy)
resources:
- manager.yaml
- auth_proxy_service.yaml

generatorOptions:
disableNameSuffixHash: true
Expand Down
2 changes: 1 addition & 1 deletion config/components/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ spec:
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources:
limits:
cpu: 500m
cpu: 2
memory: 512Mi
requests:
cpu: 500m
Expand Down
15 changes: 9 additions & 6 deletions config/components/rbac/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,15 @@ resources:
- role_binding.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml
# Comment the following 3 lines if you want to disable
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
# which protects your /metrics endpoint.
- auth_proxy_role.yaml
- auth_proxy_role_binding.yaml
- auth_proxy_client_clusterrole.yaml
# The following RBAC configurations are used to protect
# the metrics endpoint with authn/authz. These configurations
# ensure that only authorized users and service accounts
# can access the metrics endpoint. Comment the following
# permissions if you want to disable this protection.
# More info: https://book.kubebuilder.io/reference/metrics.html
- metrics_auth_role.yaml
- metrics_auth_role_binding.yaml
- metrics_reader_role.yaml
# ClusterRoles for Kueue APIs
- batch_admin_role.yaml
- batch_user_role.yaml
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxy-role
name: metrics-auth-role
rules:
- apiGroups:
- authentication.k8s.io
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: proxy-rolebinding
name: metrics-auth-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxy-role
name: metrics-auth-role
subjects:
- kind: ServiceAccount
name: controller-manager
Expand Down
7 changes: 2 additions & 5 deletions config/default/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,10 @@ resources:
# - ../components/certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../components/prometheus
# [METRICS] Expose the controller manager metrics service.
- metrics_service.yaml

patches:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
- path: manager_auth_proxy_patch.yaml

# Mount the controller config file for loading manager configurations
# through a ComponentConfig type
- path: manager_config_patch.yaml
Expand Down
18 changes: 18 additions & 0 deletions config/default/metrics_service.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: kueue
app.kubernetes.io/managed-by: kustomize
name: controller-manager-metrics-service
namespace: system
spec:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: 8443
selector:
control-plane: controller-manager

45 changes: 40 additions & 5 deletions hack/e2e-common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,19 @@ export GINKGO="$ROOT_DIR"/bin/ginkgo
export KIND="$ROOT_DIR"/bin/kind
export YQ="$ROOT_DIR"/bin/yq

export JOBSET_MANIFEST=https://github.com/kubernetes-sigs/jobset/releases/download/${JOBSET_VERSION}/manifests.yaml
export JOBSET_IMAGE=registry.k8s.io/jobset/jobset:${JOBSET_VERSION}
export JOBSET_CRDS=${ROOT_DIR}/dep-crds/jobset-operator/
if [[ -n ${JOBSET_VERSION:-} ]]; then
export JOBSET_MANIFEST="https://github.com/kubernetes-sigs/jobset/releases/download/${JOBSET_VERSION}/manifests.yaml"
export JOBSET_IMAGE=registry.k8s.io/jobset/jobset:${JOBSET_VERSION}
export JOBSET_CRDS=${ROOT_DIR}/dep-crds/jobset-operator/
fi

# sleep image to use for testing.
export E2E_TEST_SLEEP_IMAGE_OLD=gcr.io/k8s-staging-perf-tests/sleep:v0.0.3@sha256:00ae8e01dd4439edfb7eb9f1960ac28eba16e952956320cce7f2ac08e3446e6b
E2E_TEST_SLEEP_IMAGE_OLD_WITHOUT_SHA=${E2E_TEST_SLEEP_IMAGE_OLD%%@*}
export E2E_TEST_SLEEP_IMAGE=gcr.io/k8s-staging-perf-tests/sleep:v0.1.0@sha256:8d91ddf9f145b66475efda1a1b52269be542292891b5de2a7fad944052bab6ea
E2E_TEST_SLEEP_IMAGE_WITHOUT_SHA=${E2E_TEST_SLEEP_IMAGE%%@*}
export E2E_TEST_CURL_IMAGE=curlimages/curl:8.11.0@sha256:6324a8b41a7f9d80db93c7cf65f025411f55956c6b248037738df3bfca32410c
E2E_TEST_CURL_IMAGE_WITHOUT_SHA=${E2E_TEST_CURL_IMAGE%%@*}

# $1 - cluster name
function cluster_cleanup {
Expand All @@ -42,10 +52,35 @@ function cluster_create {
kubectl describe pods -n kube-system > $ARTIFACTS/$1-system-pods.log || true
}

function prepare_docker_images {
docker pull "$E2E_TEST_SLEEP_IMAGE_OLD"
docker pull "$E2E_TEST_SLEEP_IMAGE"
docker pull "$E2E_TEST_CURL_IMAGE"

# We can load image by a digest but we cannot reference it by the digest that we pulled.
# For more information https://github.com/kubernetes-sigs/kind/issues/2394#issuecomment-888713831.
# Manually create tag for image with digest which is already pulled
docker tag $E2E_TEST_SLEEP_IMAGE_OLD "$E2E_TEST_SLEEP_IMAGE_OLD_WITHOUT_SHA"
docker tag $E2E_TEST_SLEEP_IMAGE "$E2E_TEST_SLEEP_IMAGE_WITHOUT_SHA"
docker tag $E2E_TEST_CURL_IMAGE "$E2E_TEST_CURL_IMAGE_WITHOUT_SHA"

if [[ -n ${JOBSET_VERSION:-} ]]; then
docker pull "${JOBSET_IMAGE}"
fi
if [[ -n ${KUBEFLOW_VERSION:-} ]]; then
docker pull "${KUBEFLOW_IMAGE}"
fi
if [[ -n ${KUBEFLOW_MPI_VERSION:-} ]]; then
docker pull "${KUBEFLOW_MPI_IMAGE}"
fi
}

# $1 cluster
function cluster_kind_load {
cluster_kind_load_image $1 $E2E_TEST_IMAGE
cluster_kind_load_image $1 $IMAGE_TAG
cluster_kind_load_image "$1" "${E2E_TEST_SLEEP_IMAGE_OLD_WITHOUT_SHA}"
cluster_kind_load_image "$1" "${E2E_TEST_SLEEP_IMAGE_WITHOUT_SHA}"
cluster_kind_load_image "$1" "${E2E_TEST_CURL_IMAGE_WITHOUT_SHA}"
cluster_kind_load_image "$1" "$IMAGE_TAG"
}

# $1 cluster
Expand Down
4 changes: 1 addition & 3 deletions hack/e2e-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ set -o pipefail

SOURCE_DIR="$(cd "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)"
ROOT_DIR="$SOURCE_DIR/.."
export E2E_TEST_IMAGE=gcr.io/k8s-staging-perf-tests/sleep:v0.1.0

source ${SOURCE_DIR}/e2e-common.sh

Expand All @@ -47,12 +46,11 @@ function startup {
}

function kind_load {
prepare_docker_images
if [ $CREATE_KIND_CLUSTER == 'true' ]
then
docker pull $E2E_TEST_IMAGE
cluster_kind_load $KIND_CLUSTER_NAME
fi
docker pull registry.k8s.io/jobset/jobset:$JOBSET_VERSION
install_jobset $KIND_CLUSTER_NAME
}

Expand Down
7 changes: 2 additions & 5 deletions hack/multikueue-e2e-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ set -o pipefail

SOURCE_DIR="$(cd "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)"
ROOT_DIR="$SOURCE_DIR/.."
export E2E_TEST_IMAGE=gcr.io/k8s-staging-perf-tests/sleep:v0.1.0
export MANAGER_KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME}-manager
export WORKER1_KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME}-worker1
export WORKER2_KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME}-worker2
Expand Down Expand Up @@ -65,20 +64,18 @@ function startup {
}

function kind_load {
prepare_docker_images
if [ $CREATE_KIND_CLUSTER == 'true' ]
then
docker pull $E2E_TEST_IMAGE
cluster_kind_load $MANAGER_KIND_CLUSTER_NAME
cluster_kind_load $WORKER1_KIND_CLUSTER_NAME
cluster_kind_load $WORKER2_KIND_CLUSTER_NAME

fi

# JOBSET SETUP
docker pull registry.k8s.io/jobset/jobset:$JOBSET_VERSION
install_jobset $MANAGER_KIND_CLUSTER_NAME
install_jobset $WORKER1_KIND_CLUSTER_NAME
install_jobset $WORKER2_KIND_CLUSTER_NAME
fi
}

function kueue_deploy {
Expand Down
Loading

0 comments on commit 096163b

Please sign in to comment.