Skip to content

Commit

Permalink
Fix unused function args or returns
Browse files Browse the repository at this point in the history
All cases found by enabling the 'unparam' linter in golangci-lint.
  • Loading branch information
sharnoff committed Oct 12, 2023
1 parent f24ef72 commit cc31be8
Show file tree
Hide file tree
Showing 10 changed files with 47 additions and 76 deletions.
6 changes: 3 additions & 3 deletions pkg/agent/billing/billing.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func RunBillingMetricsCollector(
// The rest of this function is to do with collection
logger = logger.Named("collect")

state.collect(logger, conf, store, metrics)
state.collect(logger, store, metrics)

for {
select {
Expand All @@ -125,7 +125,7 @@ func RunBillingMetricsCollector(
err := errors.New("VM store stopped but background context is still live")
logger.Panic("Validation check failed", zap.Error(err))
}
state.collect(logger, conf, store, metrics)
state.collect(logger, store, metrics)
case <-accumulateTicker.C:
logger.Info("Creating billing batch")
state.drainEnqueue(logger, conf, client.Hostname(), queueWriter)
Expand All @@ -135,7 +135,7 @@ func RunBillingMetricsCollector(
}
}

func (s *metricsState) collect(logger *zap.Logger, conf *Config, store VMStoreForNode, metrics PromMetrics) {
func (s *metricsState) collect(logger *zap.Logger, store VMStoreForNode, metrics PromMetrics) {
now := time.Now()

metricsBatch := metrics.forBatch()
Expand Down
4 changes: 2 additions & 2 deletions pkg/agent/core/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,9 +231,9 @@ func (s *State) info(msg string, fields ...zap.Field) {
}
}

func (s *State) warn(msg string, fields ...zap.Field) {
func (s *State) warn(msg string /* , fields ...zap.Field */) {
if s.config.Log.Warn != nil {
s.config.Log.Warn(msg, fields...)
s.config.Log.Warn(msg /* , fields... */)
}
}

Expand Down
24 changes: 12 additions & 12 deletions pkg/agent/core/state_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -740,8 +740,8 @@ func TestDeniedDownscalingIncreaseAndRetry(t *testing.T) {
func TestRequestedUpscale(t *testing.T) {
a := helpers.NewAssert(t)
clock := helpers.NewFakeClock(t)
clockTick := func() helpers.Elapsed {
return clock.Inc(100 * time.Millisecond)
clockTick := func() {
clock.Inc(100 * time.Millisecond)
}
resForCU := DefaultComputeUnit.Mul

Expand Down Expand Up @@ -1070,8 +1070,8 @@ func TestDownscalePivotBack(t *testing.T) {
func TestSchedulerDownscaleReupscale(t *testing.T) {
a := helpers.NewAssert(t)
clock := helpers.NewFakeClock(t)
clockTick := func() helpers.Elapsed {
return clock.Inc(100 * time.Millisecond)
clockTick := func() {
clock.Inc(100 * time.Millisecond)
}
resForCU := DefaultComputeUnit.Mul

Expand Down Expand Up @@ -1224,8 +1224,8 @@ func TestSchedulerDownscaleReupscale(t *testing.T) {
func TestBoundsChangeRequiresDownsale(t *testing.T) {
a := helpers.NewAssert(t)
clock := helpers.NewFakeClock(t)
clockTick := func() helpers.Elapsed {
return clock.Inc(100 * time.Millisecond)
clockTick := func() {
clock.Inc(100 * time.Millisecond)
}
resForCU := DefaultComputeUnit.Mul

Expand Down Expand Up @@ -1321,8 +1321,8 @@ func TestBoundsChangeRequiresDownsale(t *testing.T) {
func TestBoundsChangeRequiresUpscale(t *testing.T) {
a := helpers.NewAssert(t)
clock := helpers.NewFakeClock(t)
clockTick := func() helpers.Elapsed {
return clock.Inc(100 * time.Millisecond)
clockTick := func() {
clock.Inc(100 * time.Millisecond)
}
resForCU := DefaultComputeUnit.Mul

Expand Down Expand Up @@ -1415,8 +1415,8 @@ func TestBoundsChangeRequiresUpscale(t *testing.T) {
func TestFailedRequestRetry(t *testing.T) {
a := helpers.NewAssert(t)
clock := helpers.NewFakeClock(t)
clockTick := func() helpers.Elapsed {
return clock.Inc(100 * time.Millisecond)
clockTick := func() {
clock.Inc(100 * time.Millisecond)
}
resForCU := DefaultComputeUnit.Mul

Expand Down Expand Up @@ -1533,8 +1533,8 @@ func TestFailedRequestRetry(t *testing.T) {
func TestMetricsConcurrentUpdatedDuringDownscale(t *testing.T) {
a := helpers.NewAssert(t)
clock := helpers.NewFakeClock(t)
clockTick := func() helpers.Elapsed {
return clock.Inc(100 * time.Millisecond)
clockTick := func() {
clock.Inc(100 * time.Millisecond)
}
resForCU := DefaultComputeUnit.Mul

Expand Down
27 changes: 15 additions & 12 deletions pkg/agent/globalstate.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ func (r MainRunner) newAgentState(
broker *pubsub.Broker[schedwatch.WatchEvent],
schedulerStore *watch.Store[corev1.Pod],
) (*agentState, *prometheus.Registry) {
metrics, promReg := makePrometheusParts()

state := &agentState{
lock: util.NewChanMutex(),
pods: make(map[util.NamespacedName]*podState),
Expand All @@ -62,12 +64,9 @@ func (r MainRunner) newAgentState(
podIP: podIP,
schedulerEventBroker: broker,
schedulerStore: schedulerStore,
metrics: PromMetrics{}, //nolint:exhaustruct // set below
metrics: metrics,
}

var promReg *prometheus.Registry
state.metrics, promReg = makePrometheusParts(state)

return state, promReg
}

Expand Down Expand Up @@ -168,8 +167,7 @@ func (s *agentState) handleVMEventAdded(
// Empty update to trigger updating metrics and state.
status.update(s, func(s podStatus) podStatus { return s })

restartCount := 0
runner := s.newRunner(event.vmInfo, podName, event.podIP, restartCount)
runner := s.newRunner(event.vmInfo, podName, event.podIP)
runner.status = status

txVMUpdate, rxVMUpdate := util.NewCondChannelPair()
Expand All @@ -182,7 +180,8 @@ func (s *agentState) handleVMEventAdded(
vmInfoUpdated: txVMUpdate,
}
s.metrics.runnerStarts.Inc()
logger := s.loggerForRunner(event.vmInfo.NamespacedName(), podName)
restartCount := 0
logger := s.loggerForRunner(restartCount, event.vmInfo.NamespacedName(), podName)
runner.Spawn(runnerCtx, logger, rxVMUpdate)
}

Expand Down Expand Up @@ -319,7 +318,7 @@ func (s *agentState) TriggerRestartIfNecessary(runnerCtx context.Context, logger
s.metrics.runnerRestarts.Inc()

restartCount := len(status.previousEndStates) + 1
runner := s.newRunner(status.vmInfo, podName, podIP, restartCount)
runner := s.newRunner(status.vmInfo, podName, podIP)
runner.status = pod.status

txVMUpdate, rxVMUpdate := util.NewCondChannelPair()
Expand All @@ -331,19 +330,23 @@ func (s *agentState) TriggerRestartIfNecessary(runnerCtx context.Context, logger
status.endState = nil
status.startTime = time.Now()

runnerLogger := s.loggerForRunner(status.vmInfo.NamespacedName(), podName)
runnerLogger := s.loggerForRunner(restartCount, status.vmInfo.NamespacedName(), podName)
runner.Spawn(runnerCtx, runnerLogger, rxVMUpdate)
return status
})
}()
}

func (s *agentState) loggerForRunner(vmName, podName util.NamespacedName) *zap.Logger {
return s.baseLogger.Named("runner").With(zap.Object("virtualmachine", vmName), zap.Object("pod", podName))
func (s *agentState) loggerForRunner(restartCount int, vmName, podName util.NamespacedName) *zap.Logger {
return s.baseLogger.Named("runner").With(
zap.Int("restarts", restartCount),
zap.Object("virtualmachine", vmName),
zap.Object("pod", podName),
)
}

// NB: caller must set Runner.status after creation
func (s *agentState) newRunner(vmInfo api.VmInfo, podName util.NamespacedName, podIP string, restartCount int) *Runner {
func (s *agentState) newRunner(vmInfo api.VmInfo, podName util.NamespacedName, podIP string) *Runner {
return &Runner{
global: s,
status: nil, // set by caller
Expand Down
2 changes: 1 addition & 1 deletion pkg/agent/prommetrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ const (
runnerMetricStatePanicked runnerMetricState = "panicked"
)

func makePrometheusParts(globalstate *agentState) (PromMetrics, *prometheus.Registry) {
func makePrometheusParts() (PromMetrics, *prometheus.Registry) {
reg := prometheus.NewRegistry()

// register stock collectors directly:
Expand Down
4 changes: 2 additions & 2 deletions pkg/plugin/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ func (c *Config) forNode(nodeName string) *nodeConfig {
return &c.NodeDefaults
}

func (c *nodeConfig) vCpuLimits(total *resource.Quantity) (_ nodeResourceState[vmapi.MilliCPU], margin *resource.Quantity, _ error) {
func (c *nodeConfig) vCpuLimits(total *resource.Quantity) (_ nodeResourceState[vmapi.MilliCPU], margin *resource.Quantity) {
totalMilli := total.MilliValue()

margin = resource.NewMilliQuantity(0, total.Format)
Expand All @@ -276,7 +276,7 @@ func (c *nodeConfig) vCpuLimits(total *resource.Quantity) (_ nodeResourceState[v
Buffer: 0,
CapacityPressure: 0,
PressureAccountedFor: 0,
}, margin, nil
}, margin
}

func (c *nodeConfig) memoryLimits(
Expand Down
31 changes: 4 additions & 27 deletions pkg/plugin/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ func NewAutoscaleEnforcerPlugin(ctx context.Context, logger *zap.Logger, config
func makeAutoscaleEnforcerPlugin(
ctx context.Context,
logger *zap.Logger,
obj runtime.Object,
_obj runtime.Object,
h framework.Handle,
config *Config,
) (framework.Plugin, error) {
Expand Down Expand Up @@ -420,14 +420,7 @@ func (e *AutoscaleEnforcer) Filter(

var otherPodInfo podOtherResourceState
if vmInfo == nil {
otherPodInfo, err = extractPodOtherPodResourceState(pod)
if err != nil {
logger.Error("Error extracting resource state for non-VM pod", zap.Error(err))
return framework.NewStatus(
framework.UnschedulableAndUnresolvable,
fmt.Sprintf("Error getting pod info: %s", err),
)
}
otherPodInfo = extractPodOtherPodResourceState(pod)
}

e.state.lock.Lock()
Expand Down Expand Up @@ -525,16 +518,7 @@ func (e *AutoscaleEnforcer) Filter(
}

// We *also* need to count pods in ignored namespaces
resources, err := extractPodOtherPodResourceState(podInfo.Pod)
if err != nil {
// FIXME: Same duplicate "pod" field issue as above; same temporary solution.
logger.Error(
"Error extracting resource state for non-VM Pod",
zap.Object("pod", name),
zap.Error(fmt.Errorf("Error extracting resource state for %v: %w", name, err)),
)
continue
}
resources := extractPodOtherPodResourceState(podInfo.Pod)

oldRes := otherResources
otherResources = oldRes.addPod(&e.state.conf.MemSlotSize, resources)
Expand Down Expand Up @@ -888,14 +872,7 @@ func (e *AutoscaleEnforcer) Reserve(

// if this is a non-VM pod, use a different set of information for it.
if vmInfo == nil {
podResources, err := extractPodOtherPodResourceState(pod)
if err != nil {
logger.Error("Error extracting resource state for non-VM pod", zap.Error(err))
return framework.NewStatus(
framework.UnschedulableAndUnresolvable,
fmt.Sprintf("Error getting non-VM pod info: %v", err),
)
}
podResources := extractPodOtherPodResourceState(pod)

oldNodeRes := node.otherResources
newNodeRes := node.otherResources.addPod(&e.state.conf.MemSlotSize, podResources)
Expand Down
20 changes: 5 additions & 15 deletions pkg/plugin/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -593,10 +593,7 @@ func buildInitialNodeState(logger *zap.Logger, node *corev1.Node, conf *Config)
return nil, errors.New("Node has no Allocatable or Capacity CPU limits")
}

vCPU, marginCpu, err := nodeConf.vCpuLimits(cpuQ)
if err != nil {
return nil, fmt.Errorf("Error calculating vCPU limits for node %s: %w", node.Name, err)
}
vCPU, marginCpu := nodeConf.vCpuLimits(cpuQ)

// memQ = "mem, as a K8s resource.Quantity"
// -A for allocatable, -C for capacity
Expand Down Expand Up @@ -684,7 +681,7 @@ func buildInitialNodeState(logger *zap.Logger, node *corev1.Node, conf *Config)
return n, nil
}

func extractPodOtherPodResourceState(pod *corev1.Pod) (podOtherResourceState, error) {
func extractPodOtherPodResourceState(pod *corev1.Pod) podOtherResourceState {
var cpu resource.Quantity
var mem resource.Quantity

Expand All @@ -698,7 +695,7 @@ func extractPodOtherPodResourceState(pod *corev1.Pod) (podOtherResourceState, er
mem.Add(*container.Resources.Requests.Memory())
}

return podOtherResourceState{RawCPU: cpu, RawMemory: mem}, nil
return podOtherResourceState{RawCPU: cpu, RawMemory: mem}
}

func (e *AutoscaleEnforcer) handleNodeDeletion(logger *zap.Logger, nodeName string) {
Expand Down Expand Up @@ -758,11 +755,7 @@ func (e *AutoscaleEnforcer) handlePodStarted(logger *zap.Logger, pod *corev1.Pod

logger.Info("Handling non-VM pod start event")

podResources, err := extractPodOtherPodResourceState(pod)
if err != nil {
logger.Error("Error extracting resource state for non-VM pod", zap.Error(err))
return
}
podResources := extractPodOtherPodResourceState(pod)

e.state.lock.Lock()
defer e.state.lock.Unlock()
Expand Down Expand Up @@ -1513,10 +1506,7 @@ func (p *AutoscaleEnforcer) readClusterState(ctx context.Context, logger *zap.Lo

// TODO: this is largely duplicated from Reserve, so we should deduplicate it (probably into
// trans.go or something).
podRes, err := extractPodOtherPodResourceState(pod)
if err != nil {
return fmt.Errorf("Error extracting resource state from non-VM pod %v: %w", podName, err)
}
podRes := extractPodOtherPodResourceState(pod)

oldNodeRes := ns.otherResources
newNodeRes := ns.otherResources.addPod(&p.state.conf.MemSlotSize, podRes)
Expand Down
2 changes: 2 additions & 0 deletions pkg/plugin/trans.go
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,8 @@ func (r resourceTransition[T]) handleAutoscalingDisabled() (verdict string) {
//
// If the pod is the migration source, this method *also* increases the node's PressureAccountedFor
// to match the pod's resource usage.
//
//nolint:unparam // linter complains about 'source'. FIXME: needs more work to figure this out.
func (r resourceTransition[T]) handleStartMigration(source bool) (verdict string) {
// This method is basically the same as handleAutoscalingDisabled, except we also update the
// node's PressureAccountedFor because any pressure generated by the pod will be resolved once
Expand Down
3 changes: 1 addition & 2 deletions pkg/util/watch/watch.go
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ func Watch[C Client[L], L metav1.ListMetaAccessor, T any, P Object[T]](

// Wrap the remainder in a function, so we can have deferred unlocks.
uid := meta.GetUID()
err := handleEvent(&store, config, handlers, event.Type, uid, obj)
err := handleEvent(&store, handlers, event.Type, uid, obj)
if err != nil {
name := util.NamespacedName{Namespace: meta.GetNamespace(), Name: meta.GetName()}
logger.Error(
Expand Down Expand Up @@ -485,7 +485,6 @@ func Watch[C Client[L], L metav1.ListMetaAccessor, T any, P Object[T]](
// helper for Watch. Error events are expected to already have been handled by the caller.
func handleEvent[T any, P ~*T](
store *Store[T],
config Config,
handlers HandlerFuncs[P],
eventType watch.EventType,
uid types.UID,
Expand Down

0 comments on commit cc31be8

Please sign in to comment.