Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore/upgrade all k8s 1.29 #1156

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 0 additions & 8 deletions .github/workflows/build-images.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -136,12 +136,6 @@ jobs:
# to the tagged commit.
echo "info=$(git describe --tags --long --dirty)" >> $GITHUB_OUTPUT

- name: get CA base git tag
id: get-ca-tag
if: ${{ format('{0}', inputs.build-cluster-autoscaler) == 'true' }}
run: |
echo "tag=$(cat cluster-autoscaler/ca.tag)" >> $GITHUB_OUTPUT

- name: set custom docker config directory
uses: ./.github/actions/set-docker-config-dir

Expand Down Expand Up @@ -307,8 +301,6 @@ jobs:
tags: ${{ needs.tags.outputs.cluster-autoscaler }}
cache-from: type=registry,ref=cache.neon.build/cluster-autoscaler-neonvm:cache
cache-to: ${{ github.ref_name == 'main' && 'type=registry,ref=cache.neon.build/cluster-autoscaler-neonvm:cache,mode=max' || '' }}
build-args: |
CA_GIT_TAG=${{ steps.get-ca-tag.outputs.tag }}

- name: Copy all images to ECR
if: ${{ format('{0}', inputs.upload-to-ecr) == 'true' }}
Expand Down
7 changes: 0 additions & 7 deletions .github/workflows/check-ca-builds.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,6 @@ jobs:
username: ${{ secrets.NEON_CI_DOCKERCACHE_USERNAME }}
password: ${{ secrets.NEON_CI_DOCKERCACHE_PASSWORD }}

- name: get CA base git tag
id: get-ca-tag
run: |
echo "tag=$(cat cluster-autoscaler/ca.tag)" | tee -a $GITHUB_OUTPUT

- name: Build cluster-autoscaler image
uses: docker/build-push-action@v6
with:
Expand All @@ -45,5 +40,3 @@ jobs:
push: false
file: cluster-autoscaler/Dockerfile
cache-from: type=registry,ref=cache.neon.build/cluster-autoscaler-neonvm:cache
build-args: |
CA_GIT_TAG=${{ steps.get-ca-tag.outputs.tag }}
2 changes: 1 addition & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ jobs:
- uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2.1
with:
check_hidden: true
ignore_words_list: NotIn # k8s match selectors; codespell tries to correct it to 'noting', 'not in', or 'notion'
ignore_words_list: NotIn, notin # k8s match selectors; codespell tries to correct it to 'noting', 'not in', or 'notion'
skip: go.sum,./cluster-autoscaler/ca.patch,./neonvm/config/multus-common/nad-crd.yaml

actionlint:
Expand Down
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -489,15 +489,15 @@ KUTTL ?= $(LOCALBIN)/kuttl
KUTTL_VERSION ?= v0.16.0

KUBECTL ?= $(LOCALBIN)/kubectl
KUBECTL_VERSION ?= v1.28.12
KUBECTL_VERSION ?= v1.29.10

KIND ?= $(LOCALBIN)/kind
# https://github.com/kubernetes-sigs/kind/releases/tag/v0.23.0, supports k8s up to 1.30
KIND_VERSION ?= v0.23.0

K3D ?= $(LOCALBIN)/k3d
# k8s deps in go.mod @ v1.29.4 (nb: binary, separate from images)
K3D_VERSION ?= v5.6.3
K3D_VERSION ?= v5.7.4

## Install tools
KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh"
Expand Down
2 changes: 1 addition & 1 deletion autoscale-scheduler/config_map.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ metadata:
namespace: kube-system
data:
scheduler-config.yaml: |
apiVersion: kubescheduler.config.k8s.io/v1beta3
apiVersion: kubescheduler.config.k8s.io/v1
kind: KubeSchedulerConfiguration
leaderElection:
leaderElect: false
Expand Down
8 changes: 5 additions & 3 deletions cluster-autoscaler/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
# NOTE: This must match CA's builder/Dockerfile:
# https://github.com/kubernetes/autoscaler/blob/<GIT_TAG>/builder/Dockerfile
FROM golang:1.20.12 AS builder
FROM golang:1.21.6 AS builder

WORKDIR /workspace

ARG CA_GIT_TAG
COPY ca.branch ca.branch
COPY ca.commit ca.commit

# Download the repo. It's... quite large, but thankfully this should get cached
RUN git clone --depth=1 -b $CA_GIT_TAG https://github.com/kubernetes/autoscaler
RUN git clone -b `cat ca.branch` https://github.com/kubernetes/autoscaler
RUN CA_GIT_TAG=`cat ca.commit` cd autoscaler && git reset --hard $CA_GIT_TAG

# Only ADD the patch after downloading, to avoid wrecking the cache
COPY ca.patch ca.patch
Expand Down
1 change: 1 addition & 0 deletions cluster-autoscaler/ca.branch
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
cluster-autoscaler-release-1.29
1 change: 1 addition & 0 deletions cluster-autoscaler/ca.commit
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
d4bbc686ac02a77a6ad1362fe7bbda387e8f074a
163 changes: 43 additions & 120 deletions cluster-autoscaler/ca.patch
Original file line number Diff line number Diff line change
@@ -1,47 +1,24 @@
diff --git a/cluster-autoscaler/utils/kubernetes/listers.go b/cluster-autoscaler/utils/kubernetes/listers.go
index d0033550f..fa3c2ec30 100644
index b9be94b6e..df9dc08a9 100644
--- a/cluster-autoscaler/utils/kubernetes/listers.go
+++ b/cluster-autoscaler/utils/kubernetes/listers.go
@@ -17,14 +17,19 @@ limitations under the License.
@@ -17,10 +17,12 @@ limitations under the License.
package kubernetes

import (
+ "encoding/json"
"time"

appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"

apiv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
client "k8s.io/client-go/kubernetes"
v1appslister "k8s.io/client-go/listers/apps/v1"
v1batchlister "k8s.io/client-go/listers/batch/v1"
@@ -185,6 +190,7 @@ func NewUnschedulablePodInNamespaceLister(kubeClient client.Interface, namespace
selector := fields.ParseSelectorOrDie("spec.nodeName==" + "" + ",status.phase!=" +
string(apiv1.PodSucceeded) + ",status.phase!=" + string(apiv1.PodFailed))
podListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "pods", namespace, selector)
+ podListWatch = wrapListWatchWithNeonVMUsage(podListWatch)
store, reflector := cache.NewNamespaceKeyedIndexerAndReflector(podListWatch, &apiv1.Pod{}, time.Hour)
podLister := v1lister.NewPodLister(store)
go reflector.Run(stopchannel)
@@ -209,6 +215,7 @@ func NewScheduledPodLister(kubeClient client.Interface, stopchannel <-chan struc
selector := fields.ParseSelectorOrDie("spec.nodeName!=" + "" + ",status.phase!=" +
string(apiv1.PodSucceeded) + ",status.phase!=" + string(apiv1.PodFailed))
podListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "pods", apiv1.NamespaceAll, selector)
+ podListWatch = wrapListWatchWithNeonVMUsage(podListWatch)
store, reflector := cache.NewNamespaceKeyedIndexerAndReflector(podListWatch, &apiv1.Pod{}, time.Hour)
podLister := v1lister.NewPodLister(store)
go reflector.Run(stopchannel)
@@ -218,6 +225,105 @@ func NewScheduledPodLister(kubeClient client.Interface, stopchannel <-chan struc
}
"k8s.io/client-go/informers"
@@ -46,6 +48,14 @@ type ListerRegistry interface {
StatefulSetLister() v1appslister.StatefulSetLister
}

+// copied from github.com/neondatabase/autoscaling, neonvm/apis/neonvm/v1/virtualmachine_types.go.
+//
+// this is duplicated so we're not *also* managing an additional dependency.
Expand All @@ -50,97 +27,43 @@ index d0033550f..fa3c2ec30 100644
+ Memory resource.Quantity `json:"memory"`
+}
+
+func wrapListWatchWithNeonVMUsage(lw *cache.ListWatch) *cache.ListWatch {
+ updatePodRequestsFromNeonVMAnnotation := func(pod *apiv1.Pod) {
+ annotation, ok := pod.Annotations["vm.neon.tech/usage"]
+ if !ok {
+ return
+ }
+
+ var usage virtualMachineUsage
+ if err := json.Unmarshal([]byte(annotation), &usage); err != nil {
+ return
+ }
+
+ pod.Spec.Containers[0].Resources.Requests = apiv1.ResourceList(map[apiv1.ResourceName]resource.Quantity{
+ apiv1.ResourceCPU: usage.CPU,
+ apiv1.ResourceMemory: usage.Memory,
+ })
type listerRegistryImpl struct {
allNodeLister NodeLister
readyNodeLister NodeLister
@@ -221,6 +231,22 @@ type AllPodLister struct {
podLister v1lister.PodLister
}

+func updatePodRequestsFromNeonVMAnnotation(pod *apiv1.Pod) {
+ annotation, ok := pod.Annotations["vm.neon.tech/usage"]
+ if !ok {
+ return
+ }
+
+ return &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ obj, err := lw.List(options)
+ if err != nil {
+ return obj, err
+ }
+
+ list := obj.(*apiv1.PodList)
+ for i := range list.Items {
+ updatePodRequestsFromNeonVMAnnotation(&list.Items[i])
+ }
+ return obj, nil
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ iface, err := lw.Watch(options)
+ if err != nil {
+ return iface, err
+ }
+
+ // Wrap the channel to update the pods as they come through
+ wrappedEvents := make(chan watch.Event)
+ proxyIface := watch.NewProxyWatcher(wrappedEvents)
+
+ go func() {
+ events := iface.ResultChan()
+
+ for {
+ var ok bool
+ var ev watch.Event
+
+ select {
+ case <-proxyIface.StopChan():
+ return
+ case ev, ok = <-events:
+ if !ok {
+ close(wrappedEvents)
+ return
+ }
+ }
+
+ // Quoting the docs on watch.Event.Object:
+ //
+ // > Object is:
+ // > * If Type is Added or Modified: the new state of the object
+ // > * If type is Deleted: the state of the object immediately before deletion.
+ // > * If Type is Bookmark: the object [ ... ] where only ResourceVersion field
+ // > is set.
+ // > * If Type is Error: *api.Status is recommended; other types may make sense
+ // > depending on context.
+ //
+ // So basically, we want to process the object only if ev.Type is Added,
+ // Modified, or Deleted.
+ if ev.Type == watch.Added || ev.Type == watch.Modified || ev.Type == watch.Deleted {
+ pod := ev.Object.(*apiv1.Pod)
+ updatePodRequestsFromNeonVMAnnotation(pod)
+ }
+
+ // Pass along the maybe-updated event
+ select {
+ case <-proxyIface.StopChan():
+ return
+ case wrappedEvents <- ev:
+ // continue on to next event
+ }
+ }
+ }()
+
+ return proxyIface, nil
+ },
+ DisableChunking: lw.DisableChunking,
+ var usage virtualMachineUsage
+ if err := json.Unmarshal([]byte(annotation), &usage); err != nil {
+ return
+ }
+ pod.Spec.Containers[0].Resources.Requests = apiv1.ResourceList(map[apiv1.ResourceName]resource.Quantity{
+ apiv1.ResourceCPU: usage.CPU,
+ apiv1.ResourceMemory: usage.Memory,
+ })
+}
+
// NodeLister lists nodes.
type NodeLister interface {
List() ([]*apiv1.Node, error)
// List returns all scheduled pods.
func (lister *AllPodLister) List() ([]*apiv1.Pod, error) {
var pods []*apiv1.Pod
@@ -229,9 +255,12 @@ func (lister *AllPodLister) List() ([]*apiv1.Pod, error) {
if err != nil {
return pods, err
}
+
for _, p := range allPods {
if p.Status.Phase != apiv1.PodSucceeded && p.Status.Phase != apiv1.PodFailed {
- pods = append(pods, p)
+ podCopy := p.DeepCopy()
+ updatePodRequestsFromNeonVMAnnotation(podCopy)
+ pods = append(pods, podCopy)
}
}
return pods, nil
1 change: 0 additions & 1 deletion cluster-autoscaler/ca.tag

This file was deleted.

Loading
Loading