diff --git a/.golangci.yml b/.golangci.yml index ab66df909..13be222da 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -25,6 +25,7 @@ linters: - staticcheck # some rules from staticcheck.io - typecheck # typechecks code, like the compiler - unused # checks for unused constants/variables/functions/types + - gofumpt # Formatter. # explicitly enabled: - asciicheck # all identifiers are ASCII diff --git a/Makefile b/Makefile index 0612541fc..1b7a96e92 100644 --- a/Makefile +++ b/Makefile @@ -33,6 +33,8 @@ endif # https://github.com/neondatabase/autoscaling/pull/130#issuecomment-1496276620 export GOFLAGS=-buildvcs=false +GOFUMPT_VERSION ?= v0.7.0 + # Setting SHELL to bash allows bash commands to be executed by recipes. # Options are set to exit when a recipe line exits non-zero or a piped command fails. SHELL = /usr/bin/env bash -o pipefail @@ -108,7 +110,7 @@ generate: ## Generate boilerplate DeepCopy methods, manifests, and Go client .PHONY: fmt fmt: ## Run go fmt against code. - go fmt ./... + go run mvdan.cc/gofumpt@${GOFUMPT_VERSION} -w . .PHONY: vet vet: ## Run go vet against code. @@ -119,7 +121,7 @@ vet: ## Run go vet against code. TESTARGS ?= ./... .PHONY: test -test: fmt vet envtest ## Run tests. +test: vet envtest ## Run tests. # chmodding KUBEBUILDER_ASSETS dir to make it deletable by owner, # otherwise it fails with actions/checkout on self-hosted GitHub runners # ref: https://github.com/kubernetes-sigs/controller-runtime/pull/2245 @@ -132,7 +134,7 @@ test: fmt vet envtest ## Run tests. ##@ Build .PHONY: build -build: fmt vet bin/vm-builder ## Build all neonvm binaries. +build: vet bin/vm-builder ## Build all neonvm binaries. GOOS=linux go build -o bin/controller neonvm/main.go GOOS=linux go build -o bin/vxlan-controller neonvm/tools/vxlan/controller/main.go GOOS=linux go build -o bin/runner neonvm/runner/*.go @@ -141,7 +143,7 @@ build: fmt vet bin/vm-builder ## Build all neonvm binaries. bin/vm-builder: ## Build vm-builder binary. GOOS=linux CGO_ENABLED=0 go build -o bin/vm-builder -ldflags "-X main.Version=${GIT_INFO} -X main.NeonvmDaemonImage=${IMG_DAEMON}" vm-builder/main.go .PHONY: run -run: fmt vet ## Run a controller from your host. +run: vet ## Run a controller from your host. go run ./neonvm/main.go .PHONY: lint diff --git a/neonvm-runner/cmd/main.go b/neonvm-runner/cmd/main.go index d8a060a41..d0bb9d95d 100644 --- a/neonvm-runner/cmd/main.go +++ b/neonvm-runner/cmd/main.go @@ -180,7 +180,7 @@ func getLines(input []byte, commentMarker []byte) [][]byte { lines := bytes.Split(input, []byte("\n")) var output [][]byte for _, currentLine := range lines { - var commentIndex = bytes.Index(currentLine, commentMarker) + commentIndex := bytes.Index(currentLine, commentMarker) if commentIndex == -1 { output = append(output, currentLine) } else { @@ -335,7 +335,7 @@ func createISO9660runtime( } } - outputFile, err := os.OpenFile(diskPath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644) + outputFile, err := os.OpenFile(diskPath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) if err != nil { return err } @@ -534,7 +534,7 @@ func createISO9660FromPath(logger *zap.Logger, diskName string, diskPath string, } } - outputFile, err := os.OpenFile(diskPath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644) + outputFile, err := os.OpenFile(diskPath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) if err != nil { return err } @@ -1733,7 +1733,7 @@ func defaultNetwork(logger *zap.Logger, cidr string, ports []vmv1.Port) (mac.MAC // Adding VM's IP address to the /etc/hosts, so we can access it easily from // the pod. This is particularly useful for ssh into the VM from the runner // pod. - f, err := os.OpenFile("/etc/hosts", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + f, err := os.OpenFile("/etc/hosts", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { return nil, err } diff --git a/neonvm-vxlan-controller/cmd/main.go b/neonvm-vxlan-controller/cmd/main.go index 50823a1c2..1e88080f9 100644 --- a/neonvm-vxlan-controller/cmd/main.go +++ b/neonvm-vxlan-controller/cmd/main.go @@ -31,9 +31,7 @@ const ( extraNetCidr = "10.100.0.0/16" ) -var ( - deleteIfaces = flag.Bool("delete", false, `delete VXLAN interfaces`) -) +var deleteIfaces = flag.Bool("delete", false, `delete VXLAN interfaces`) func main() { flag.Parse() @@ -192,7 +190,6 @@ func createVxlanInterface(name string, vxlanID int, ownIP string, bridgeName str } func updateFDB(vxlanName string, nodeIPs []string, ownIP string) error { - broadcastFdbMac, _ := net.ParseMAC("00:00:00:00:00:00") // get vxlan interface details @@ -248,7 +245,6 @@ func deleteLink(name string) error { } func upsertIptablesRules() error { - // manage iptables ipt, err := iptables.New(iptables.IPFamily(iptables.ProtocolIPv4), iptables.Timeout(5)) if err != nil { diff --git a/neonvm/apis/neonvm/v1/webhook_suite_test.go b/neonvm/apis/neonvm/v1/webhook_suite_test.go index 7cbc15b8b..93834fbdc 100644 --- a/neonvm/apis/neonvm/v1/webhook_suite_test.go +++ b/neonvm/apis/neonvm/v1/webhook_suite_test.go @@ -44,11 +44,13 @@ import ( // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment -var ctx context.Context -var cancel context.CancelFunc +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc +) func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) @@ -127,7 +129,6 @@ var _ = BeforeSuite(func() { conn.Close() return nil }).Should(Succeed()) - }) var _ = AfterSuite(func() { diff --git a/pkg/agent/billing/billing.go b/pkg/agent/billing/billing.go index ac21b251f..359162bdd 100644 --- a/pkg/agent/billing/billing.go +++ b/pkg/agent/billing/billing.go @@ -148,7 +148,6 @@ func (mc *MetricsCollector) Run( store VMStoreForNode, metrics PromMetrics, ) error { - collectTicker := time.NewTicker(time.Second * time.Duration(mc.conf.CollectEverySeconds)) defer collectTicker.Stop() // Offset by half a second, so it's a bit more deterministic. diff --git a/pkg/agent/billing/indexedstore.go b/pkg/agent/billing/indexedstore.go index 563dcea29..a385ce976 100644 --- a/pkg/agent/billing/indexedstore.go +++ b/pkg/agent/billing/indexedstore.go @@ -37,10 +37,12 @@ func (i *VMNodeIndex) Add(vm *vmapi.VirtualMachine) { i.forNode[vm.UID] = vm } } + func (i *VMNodeIndex) Update(oldVM, newVM *vmapi.VirtualMachine) { i.Delete(oldVM) i.Add(newVM) } + func (i *VMNodeIndex) Delete(vm *vmapi.VirtualMachine) { // note: delete is a no-op if the key isn't present. delete(i.forNode, vm.UID) diff --git a/pkg/agent/billing/prommetrics.go b/pkg/agent/billing/prommetrics.go index fbaf89c85..0453a85c7 100644 --- a/pkg/agent/billing/prommetrics.go +++ b/pkg/agent/billing/prommetrics.go @@ -90,8 +90,10 @@ func (m PromMetrics) forBatch() batchMetrics { } } -type isEndpointFlag bool -type autoscalingEnabledFlag bool +type ( + isEndpointFlag bool + autoscalingEnabledFlag bool +) func (b batchMetrics) inc(isEndpoint isEndpointFlag, autoscalingEnabled autoscalingEnabledFlag, phase vmapi.VmPhase) { key := batchMetricsLabels{ diff --git a/pkg/agent/core/state_test.go b/pkg/agent/core/state_test.go index d975de870..bee432816 100644 --- a/pkg/agent/core/state_test.go +++ b/pkg/agent/core/state_test.go @@ -1495,7 +1495,6 @@ func TestDownscalePivotBack(t *testing.T) { a.Do(state.UpdateSystemMetrics, newMetrics) a.Call(getDesiredResources, state, clock.Now()). Equals(resForCU(2)) - } } diff --git a/pkg/agent/core/testhelpers/construct.go b/pkg/agent/core/testhelpers/construct.go index a97e8a10b..d50c976e5 100644 --- a/pkg/agent/core/testhelpers/construct.go +++ b/pkg/agent/core/testhelpers/construct.go @@ -96,9 +96,11 @@ func CreateVmInfo(config InitialVmInfoConfig, opts ...VmInfoOpt) api.VmInfo { return vm } -type coreConfigModifier func(*core.Config) -type vmInfoConfigModifier func(*InitialVmInfoConfig) -type vmInfoModifier func(InitialVmInfoConfig, *api.VmInfo) +type ( + coreConfigModifier func(*core.Config) + vmInfoConfigModifier func(*InitialVmInfoConfig) + vmInfoModifier func(InitialVmInfoConfig, *api.VmInfo) +) var ( _ VmInfoOpt = vmInfoConfigModifier(nil) diff --git a/pkg/agent/prommetrics.go b/pkg/agent/prommetrics.go index 01ee5fab3..dac29887b 100644 --- a/pkg/agent/prommetrics.go +++ b/pkg/agent/prommetrics.go @@ -74,8 +74,10 @@ const ( // Copied bucket values from controller runtime latency metric. We can // adjust them in the future if needed. -var buckets = []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, - 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60} +var buckets = []float64{ + 0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, + 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60, +} func makeGlobalMetrics() (GlobalMetrics, *prometheus.Registry) { reg := prometheus.NewRegistry() diff --git a/pkg/agent/runner.go b/pkg/agent/runner.go index 7d0778922..b198d17c0 100644 --- a/pkg/agent/runner.go +++ b/pkg/agent/runner.go @@ -666,7 +666,6 @@ func (r *Runner) doNeonVMRequest( // Also relevant: _, err = r.global.vmClient.NeonvmV1().VirtualMachines(r.vmName.Namespace). Patch(requestCtx, r.vmName.Name, ktypes.JSONPatchType, patchPayload, metav1.PatchOptions{}) - if err != nil { errMsg := util.RootError(err).Error() // Some error messages contain the object name. We could try to filter them all out, but diff --git a/pkg/api/vminfo.go b/pkg/api/vminfo.go index e50c5f303..274fb174d 100644 --- a/pkg/api/vminfo.go +++ b/pkg/api/vminfo.go @@ -93,7 +93,6 @@ func NewVmMemInfo(memSlots vmapi.MemorySlots, memSlotSize resource.Quantity) VmM Use: uint16(memSlots.Use), SlotSize: Bytes(memSlotSize.Value()), } - } // VmConfig stores the autoscaling-specific "extra" configuration derived from labels and diff --git a/pkg/billing/client.go b/pkg/billing/client.go index 5ba8ce922..16b98882a 100644 --- a/pkg/billing/client.go +++ b/pkg/billing/client.go @@ -112,7 +112,6 @@ func NewS3Client(ctx context.Context, cfg S3ClientConfig) (*S3Client, error) { defer cancel() s3Config, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(cfg.Region)) - if err != nil { return nil, S3Error{Err: err} } @@ -189,7 +188,6 @@ func (c S3Client) send(ctx context.Context, payload []byte, _ TraceID) error { Key: &key, Body: r, }) - if err != nil { return S3Error{Err: err} } diff --git a/pkg/neonvm/controllers/functests/suite_test.go b/pkg/neonvm/controllers/functests/suite_test.go index a8d339996..87a3c3ba8 100644 --- a/pkg/neonvm/controllers/functests/suite_test.go +++ b/pkg/neonvm/controllers/functests/suite_test.go @@ -36,9 +36,11 @@ import ( // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment +) func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) @@ -69,7 +71,6 @@ var _ = BeforeSuite(func() { k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) - }) var _ = AfterSuite(func() { diff --git a/pkg/neonvm/controllers/functests/vm_controller_test.go b/pkg/neonvm/controllers/functests/vm_controller_test.go index 8b3987b6c..47ab7f5cc 100644 --- a/pkg/neonvm/controllers/functests/vm_controller_test.go +++ b/pkg/neonvm/controllers/functests/vm_controller_test.go @@ -36,7 +36,6 @@ import ( var _ = Describe("VirtualMachine controller", func() { Context("VirtualMachine controller test", func() { - const VirtualMachineName = "test-virtualmachine" ctx := context.Background() diff --git a/pkg/neonvm/controllers/metrics.go b/pkg/neonvm/controllers/metrics.go index 9ec978c59..083f3a7b9 100644 --- a/pkg/neonvm/controllers/metrics.go +++ b/pkg/neonvm/controllers/metrics.go @@ -33,8 +33,10 @@ const OutcomeLabel = "outcome" func MakeReconcilerMetrics() ReconcilerMetrics { // Copied bucket values from controller runtime latency metric. We can // adjust them in the future if needed. - buckets := []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, - 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60} + buckets := []float64{ + 0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, + 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60, + } m := ReconcilerMetrics{ failing: util.RegisterMetric(metrics.Registry, prometheus.NewGaugeVec( diff --git a/pkg/neonvm/controllers/vm_controller.go b/pkg/neonvm/controllers/vm_controller.go index 7a77668b6..80a35615d 100644 --- a/pkg/neonvm/controllers/vm_controller.go +++ b/pkg/neonvm/controllers/vm_controller.go @@ -457,10 +457,12 @@ func (r *VMReconciler) doReconcile(ctx context.Context, vm *vmv1.VirtualMachine) vm.Status.PodIP = vmRunner.Status.PodIP vm.Status.Phase = vmv1.VmRunning meta.SetStatusCondition(&vm.Status.Conditions, - metav1.Condition{Type: typeAvailableVirtualMachine, + metav1.Condition{ + Type: typeAvailableVirtualMachine, Status: metav1.ConditionTrue, Reason: "Reconciling", - Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) created successfully", vm.Status.PodName, vm.Name)}) + Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) created successfully", vm.Status.PodName, vm.Name), + }) { // Calculating VM startup latency metrics now := time.Now() @@ -475,24 +477,30 @@ func (r *VMReconciler) doReconcile(ctx context.Context, vm *vmv1.VirtualMachine) case runnerSucceeded: vm.Status.Phase = vmv1.VmSucceeded meta.SetStatusCondition(&vm.Status.Conditions, - metav1.Condition{Type: typeAvailableVirtualMachine, + metav1.Condition{ + Type: typeAvailableVirtualMachine, Status: metav1.ConditionFalse, Reason: "Reconciling", - Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) succeeded", vm.Status.PodName, vm.Name)}) + Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) succeeded", vm.Status.PodName, vm.Name), + }) case runnerFailed: vm.Status.Phase = vmv1.VmFailed meta.SetStatusCondition(&vm.Status.Conditions, - metav1.Condition{Type: typeDegradedVirtualMachine, + metav1.Condition{ + Type: typeDegradedVirtualMachine, Status: metav1.ConditionTrue, Reason: "Reconciling", - Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) failed", vm.Status.PodName, vm.Name)}) + Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) failed", vm.Status.PodName, vm.Name), + }) case runnerUnknown: vm.Status.Phase = vmv1.VmPending meta.SetStatusCondition(&vm.Status.Conditions, - metav1.Condition{Type: typeAvailableVirtualMachine, + metav1.Condition{ + Type: typeAvailableVirtualMachine, Status: metav1.ConditionUnknown, Reason: "Reconciling", - Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) in Unknown phase", vm.Status.PodName, vm.Name)}) + Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) in Unknown phase", vm.Status.PodName, vm.Name), + }) default: // do nothing } @@ -507,10 +515,12 @@ func (r *VMReconciler) doReconcile(ctx context.Context, vm *vmv1.VirtualMachine) vm.Status.PodName)) vm.Status.Phase = vmv1.VmFailed meta.SetStatusCondition(&vm.Status.Conditions, - metav1.Condition{Type: typeDegradedVirtualMachine, + metav1.Condition{ + Type: typeDegradedVirtualMachine, Status: metav1.ConditionTrue, Reason: "Reconciling", - Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) not found", vm.Status.PodName, vm.Name)}) + Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) not found", vm.Status.PodName, vm.Name), + }) } else if err != nil { log.Error(err, "Failed to get runner Pod") return err @@ -595,24 +605,30 @@ func (r *VMReconciler) doReconcile(ctx context.Context, vm *vmv1.VirtualMachine) case runnerSucceeded: vm.Status.Phase = vmv1.VmSucceeded meta.SetStatusCondition(&vm.Status.Conditions, - metav1.Condition{Type: typeAvailableVirtualMachine, + metav1.Condition{ + Type: typeAvailableVirtualMachine, Status: metav1.ConditionFalse, Reason: "Reconciling", - Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) succeeded", vm.Status.PodName, vm.Name)}) + Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) succeeded", vm.Status.PodName, vm.Name), + }) case runnerFailed: vm.Status.Phase = vmv1.VmFailed meta.SetStatusCondition(&vm.Status.Conditions, - metav1.Condition{Type: typeDegradedVirtualMachine, + metav1.Condition{ + Type: typeDegradedVirtualMachine, Status: metav1.ConditionTrue, Reason: "Reconciling", - Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) failed", vm.Status.PodName, vm.Name)}) + Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) failed", vm.Status.PodName, vm.Name), + }) case runnerUnknown: vm.Status.Phase = vmv1.VmPending meta.SetStatusCondition(&vm.Status.Conditions, - metav1.Condition{Type: typeAvailableVirtualMachine, + metav1.Condition{ + Type: typeAvailableVirtualMachine, Status: metav1.ConditionUnknown, Reason: "Reconciling", - Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) in Unknown phase", vm.Status.PodName, vm.Name)}) + Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) in Unknown phase", vm.Status.PodName, vm.Name), + }) default: // do nothing } @@ -628,10 +644,12 @@ func (r *VMReconciler) doReconcile(ctx context.Context, vm *vmv1.VirtualMachine) vm.Status.PodName)) vm.Status.Phase = vmv1.VmFailed meta.SetStatusCondition(&vm.Status.Conditions, - metav1.Condition{Type: typeDegradedVirtualMachine, + metav1.Condition{ + Type: typeDegradedVirtualMachine, Status: metav1.ConditionTrue, Reason: "Reconciling", - Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) not found", vm.Status.PodName, vm.Name)}) + Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) not found", vm.Status.PodName, vm.Name), + }) } else if err != nil { log.Error(err, "Failed to get runner Pod") return err @@ -648,26 +666,32 @@ func (r *VMReconciler) doReconcile(ctx context.Context, vm *vmv1.VirtualMachine) case runnerSucceeded: vm.Status.Phase = vmv1.VmSucceeded meta.SetStatusCondition(&vm.Status.Conditions, - metav1.Condition{Type: typeAvailableVirtualMachine, + metav1.Condition{ + Type: typeAvailableVirtualMachine, Status: metav1.ConditionFalse, Reason: "Reconciling", - Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) succeeded", vm.Status.PodName, vm.Name)}) + Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) succeeded", vm.Status.PodName, vm.Name), + }) return nil case runnerFailed: vm.Status.Phase = vmv1.VmFailed meta.SetStatusCondition(&vm.Status.Conditions, - metav1.Condition{Type: typeDegradedVirtualMachine, + metav1.Condition{ + Type: typeDegradedVirtualMachine, Status: metav1.ConditionTrue, Reason: "Reconciling", - Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) failed", vm.Status.PodName, vm.Name)}) + Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) failed", vm.Status.PodName, vm.Name), + }) return nil case runnerUnknown: vm.Status.Phase = vmv1.VmPending meta.SetStatusCondition(&vm.Status.Conditions, - metav1.Condition{Type: typeAvailableVirtualMachine, + metav1.Condition{ + Type: typeAvailableVirtualMachine, Status: metav1.ConditionUnknown, Reason: "Reconciling", - Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) in Unknown phase", vm.Status.PodName, vm.Name)}) + Message: fmt.Sprintf("Pod (%s) for VirtualMachine (%s) in Unknown phase", vm.Status.PodName, vm.Name), + }) return nil default: // do nothing @@ -1330,7 +1354,7 @@ func getRunnerCgroup(ctx context.Context, vm *vmv1.VirtualMachine) (*api.VCPUCgr // imageForVirtualMachine gets the Operand image which is managed by this controller // from the VM_RUNNER_IMAGE environment variable defined in the config/manager/manager.yaml func imageForVmRunner() (string, error) { - var imageEnvVar = "VM_RUNNER_IMAGE" + imageEnvVar := "VM_RUNNER_IMAGE" image, found := os.LookupEnv(imageEnvVar) if !found { return "", fmt.Errorf("unable to find %s environment variable with the image", imageEnvVar) @@ -1555,7 +1579,7 @@ func podSpec( { Key: "ssh-privatekey", Path: "id_ed25519", - Mode: lo.ToPtr[int32](0600), + Mode: lo.ToPtr[int32](0o600), }, }, }, @@ -1570,7 +1594,7 @@ func podSpec( { Key: "ssh-publickey", Path: "authorized_keys", - Mode: lo.ToPtr[int32](0644), + Mode: lo.ToPtr[int32](0o644), }, }, }, diff --git a/pkg/neonvm/controllers/vm_qmp_queries.go b/pkg/neonvm/controllers/vm_qmp_queries.go index 3181d88da..54823b20b 100644 --- a/pkg/neonvm/controllers/vm_qmp_queries.go +++ b/pkg/neonvm/controllers/vm_qmp_queries.go @@ -801,7 +801,6 @@ func QmpGetMemorySize(ip string, port int32) (*resource.Quantity, error) { } func QmpStartMigration(virtualmachine *vmv1.VirtualMachine, virtualmachinemigration *vmv1.VirtualMachineMigration) error { - // QMP port port := virtualmachine.Spec.QMP diff --git a/pkg/neonvm/controllers/vmmigration_controller.go b/pkg/neonvm/controllers/vmmigration_controller.go index 2a7160170..ff29115e2 100644 --- a/pkg/neonvm/controllers/vmmigration_controller.go +++ b/pkg/neonvm/controllers/vmmigration_controller.go @@ -155,10 +155,12 @@ func (r *VirtualMachineMigrationReconciler) Reconcile(ctx context.Context, req c message := fmt.Sprintf("VM (%s) not found", migration.Spec.VmName) r.Recorder.Event(migration, "Warning", "Failed", message) meta.SetStatusCondition(&migration.Status.Conditions, - metav1.Condition{Type: typeDegradedVirtualMachineMigration, + metav1.Condition{ + Type: typeDegradedVirtualMachineMigration, Status: metav1.ConditionTrue, Reason: "Reconciling", - Message: message}) + Message: message, + }) migration.Status.Phase = vmv1.VmmFailed return r.updateMigrationStatus(ctx, migration) } @@ -349,10 +351,12 @@ func (r *VirtualMachineMigrationReconciler) Reconcile(ctx context.Context, req c log.Info(message) r.Recorder.Event(migration, "Normal", "Started", message) meta.SetStatusCondition(&migration.Status.Conditions, - metav1.Condition{Type: typeAvailableVirtualMachineMigration, + metav1.Condition{ + Type: typeAvailableVirtualMachineMigration, Status: metav1.ConditionTrue, Reason: "Reconciling", - Message: message}) + Message: message, + }) // finally update migration phase to Running migration.Status.Phase = vmv1.VmmRunning return r.updateMigrationStatus(ctx, migration) @@ -363,10 +367,12 @@ func (r *VirtualMachineMigrationReconciler) Reconcile(ctx context.Context, req c log.Info(message) r.Recorder.Event(migration, "Warning", "Failed", message) meta.SetStatusCondition(&migration.Status.Conditions, - metav1.Condition{Type: typeDegradedVirtualMachineMigration, + metav1.Condition{ + Type: typeDegradedVirtualMachineMigration, Status: metav1.ConditionTrue, Reason: "Reconciling", - Message: message}) + Message: message, + }) migration.Status.Phase = vmv1.VmmFailed return r.updateMigrationStatus(ctx, migration) case runnerFailed: @@ -374,10 +380,12 @@ func (r *VirtualMachineMigrationReconciler) Reconcile(ctx context.Context, req c log.Info(message) r.Recorder.Event(migration, "Warning", "Failed", message) meta.SetStatusCondition(&migration.Status.Conditions, - metav1.Condition{Type: typeDegradedVirtualMachineMigration, + metav1.Condition{ + Type: typeDegradedVirtualMachineMigration, Status: metav1.ConditionTrue, Reason: "Reconciling", - Message: message}) + Message: message, + }) migration.Status.Phase = vmv1.VmmFailed return r.updateMigrationStatus(ctx, migration) case runnerUnknown: @@ -385,10 +393,12 @@ func (r *VirtualMachineMigrationReconciler) Reconcile(ctx context.Context, req c log.Info(message) r.Recorder.Event(migration, "Warning", "Unknown", message) meta.SetStatusCondition(&migration.Status.Conditions, - metav1.Condition{Type: typeAvailableVirtualMachineMigration, + metav1.Condition{ + Type: typeAvailableVirtualMachineMigration, Status: metav1.ConditionUnknown, Reason: "Reconciling", - Message: message}) + Message: message, + }) migration.Status.Phase = vmv1.VmmPending return r.updateMigrationStatus(ctx, migration) default: @@ -405,10 +415,12 @@ func (r *VirtualMachineMigrationReconciler) Reconcile(ctx context.Context, req c message := fmt.Sprintf("Target Pod (%s) disappeared", migration.Status.TargetPodName) r.Recorder.Event(migration, "Error", "NotFound", message) meta.SetStatusCondition(&migration.Status.Conditions, - metav1.Condition{Type: typeDegradedVirtualMachineMigration, + metav1.Condition{ + Type: typeDegradedVirtualMachineMigration, Status: metav1.ConditionTrue, Reason: "Reconciling", - Message: message}) + Message: message, + }) migration.Status.Phase = vmv1.VmmFailed return r.updateMigrationStatus(ctx, migration) } else if err != nil { diff --git a/pkg/neonvm/ipam/allocate.go b/pkg/neonvm/ipam/allocate.go index 4d1d39882..46079ddd9 100644 --- a/pkg/neonvm/ipam/allocate.go +++ b/pkg/neonvm/ipam/allocate.go @@ -17,7 +17,6 @@ func doAcquire( vmName string, vmNamespace string, ) (net.IPNet, []whereaboutstypes.IPReservation, error) { - // reduce whereabouts logging whereaboutslogging.SetLogLevel("error") @@ -48,7 +47,6 @@ func doRelease( vmName string, vmNamespace string, ) (net.IPNet, []whereaboutstypes.IPReservation, error) { - // reduce whereabouts logging whereaboutslogging.SetLogLevel("error") diff --git a/pkg/neonvm/ipam/demo/ipam.go b/pkg/neonvm/ipam/demo/ipam.go index bbb8677f2..7d556c090 100644 --- a/pkg/neonvm/ipam/demo/ipam.go +++ b/pkg/neonvm/ipam/demo/ipam.go @@ -27,7 +27,6 @@ var ( ) func main() { - opts := zap.Options{ //nolint:exhaustruct // typical options struct; not all fields expected to be filled. Development: true, StacktraceLevel: zapcore.Level(zapcore.PanicLevel), @@ -90,5 +89,4 @@ func main() { time.Sleep(time.Millisecond * 200) } wg.Wait() - } diff --git a/pkg/neonvm/ipam/ipam.go b/pkg/neonvm/ipam/ipam.go index 4e60e08dd..06a7e7a0d 100644 --- a/pkg/neonvm/ipam/ipam.go +++ b/pkg/neonvm/ipam/ipam.go @@ -68,7 +68,6 @@ func (i *IPAM) ReleaseIP(ctx context.Context, vmName string, vmNamespace string) // New returns a new IPAM object with ipam config and k8s/crd clients func New(ctx context.Context, nadName string, nadNamespace string) (*IPAM, error) { - // get Kubernetes client config cfg, err := config.GetConfig() if err != nil { @@ -171,7 +170,6 @@ func LoadFromNad(nadConfig string, nadNamespace string) (*IPAMConfig, error) { // Performing IPAM actions with Leader Election to avoid duplicates func (i *IPAM) acquireORrelease(ctx context.Context, vmName string, vmNamespace string, action int) (net.IPNet, error) { - var ip net.IPNet var err error var ipamerr error diff --git a/pkg/plugin/dumpstate.go b/pkg/plugin/dumpstate.go index 69ba3c9ca..504fd81ef 100644 --- a/pkg/plugin/dumpstate.go +++ b/pkg/plugin/dumpstate.go @@ -214,7 +214,6 @@ func (s *nodeState) dump() nodeStateDump { } func (s *podState) dump() podStateDump { - var vm *vmPodState if s.vm != nil { vm = lo.ToPtr(s.vm.dump()) diff --git a/pkg/plugin/plugin.go b/pkg/plugin/plugin.go index a1fe1d86a..6efa2f38f 100644 --- a/pkg/plugin/plugin.go +++ b/pkg/plugin/plugin.go @@ -22,8 +22,10 @@ import ( "github.com/neondatabase/autoscaling/pkg/util/watch" ) -const Name = "AutoscaleEnforcer" -const LabelPluginCreatedMigration = "autoscaling.neon.tech/created-by-scheduler" +const ( + Name = "AutoscaleEnforcer" + LabelPluginCreatedMigration = "autoscaling.neon.tech/created-by-scheduler" +) // AutoscaleEnforcer is the scheduler plugin to coordinate autoscaling type AutoscaleEnforcer struct { @@ -40,16 +42,20 @@ type AutoscaleEnforcer struct { } // abbreviations, because these types are pretty verbose -type IndexedVMStore = watch.IndexedStore[vmapi.VirtualMachine, *watch.NameIndex[vmapi.VirtualMachine]] -type IndexedNodeStore = watch.IndexedStore[corev1.Node, *watch.FlatNameIndex[corev1.Node]] +type ( + IndexedVMStore = watch.IndexedStore[vmapi.VirtualMachine, *watch.NameIndex[vmapi.VirtualMachine]] + IndexedNodeStore = watch.IndexedStore[corev1.Node, *watch.FlatNameIndex[corev1.Node]] +) // Compile-time checks that AutoscaleEnforcer actually implements the interfaces we want it to -var _ framework.Plugin = (*AutoscaleEnforcer)(nil) -var _ framework.PreFilterPlugin = (*AutoscaleEnforcer)(nil) -var _ framework.PostFilterPlugin = (*AutoscaleEnforcer)(nil) -var _ framework.FilterPlugin = (*AutoscaleEnforcer)(nil) -var _ framework.ScorePlugin = (*AutoscaleEnforcer)(nil) -var _ framework.ReservePlugin = (*AutoscaleEnforcer)(nil) +var ( + _ framework.Plugin = (*AutoscaleEnforcer)(nil) + _ framework.PreFilterPlugin = (*AutoscaleEnforcer)(nil) + _ framework.PostFilterPlugin = (*AutoscaleEnforcer)(nil) + _ framework.FilterPlugin = (*AutoscaleEnforcer)(nil) + _ framework.ScorePlugin = (*AutoscaleEnforcer)(nil) + _ framework.ReservePlugin = (*AutoscaleEnforcer)(nil) +) func NewAutoscaleEnforcerPlugin(ctx context.Context, logger *zap.Logger, config *Config) func(runtime.Object, framework.Handle) (framework.Plugin, error) { return func(obj runtime.Object, h framework.Handle) (framework.Plugin, error) { diff --git a/pkg/plugin/state.go b/pkg/plugin/state.go index 0e8d369df..941318df5 100644 --- a/pkg/plugin/state.go +++ b/pkg/plugin/state.go @@ -698,7 +698,6 @@ func (e *AutoscaleEnforcer) speculativeReserve( includeBuffer bool, accept func(verdict verdictSet, overBudget bool) bool, ) (ok bool, _ verdictSet) { - // Construct the speculative state of the pod // // We'll pass this into (resourceTransitioner).handleReserve(), but only commit the changes if diff --git a/pkg/util/watch/watch.go b/pkg/util/watch/watch.go index bd984cf0d..b7d26faaf 100644 --- a/pkg/util/watch/watch.go +++ b/pkg/util/watch/watch.go @@ -742,10 +742,12 @@ func keyForObj[T any](obj *T) util.NamespacedName { func (i *NameIndex[T]) Add(obj *T) { i.namespacedNames[keyForObj(obj)] = obj } + func (i *NameIndex[T]) Update(oldObj, newObj *T) { i.Delete(oldObj) i.Add(newObj) } + func (i *NameIndex[T]) Delete(obj *T) { delete(i.namespacedNames, keyForObj(obj)) } @@ -781,10 +783,12 @@ func getName[T any](obj *T) string { func (i *FlatNameIndex[T]) Add(obj *T) { i.names[getName(obj)] = obj } + func (i *FlatNameIndex[T]) Update(oldObj, newObj *T) { i.Delete(oldObj) i.Add(newObj) } + func (i *FlatNameIndex[T]) Delete(obj *T) { delete(i.names, getName(obj)) } diff --git a/vm-builder/main.go b/vm-builder/main.go index 6771e7dea..3c379aa4c 100644 --- a/vm-builder/main.go +++ b/vm-builder/main.go @@ -92,7 +92,7 @@ func addFileToTar(tw *tar.Writer, filename string, contents []byte) error { tarHeader := &tar.Header{ Name: filename, Size: int64(len(contents)), - Mode: 0755, // TODO: shouldn't just set this for everything. + Mode: 0o755, // TODO: shouldn't just set this for everything. } if err := tw.WriteHeader(tarHeader); err != nil { @@ -452,7 +452,6 @@ func main() { } } - } type imageSpec struct {