Skip to content

Commit

Permalink
plugin: Log only once on successful agent request (#1015)
Browse files Browse the repository at this point in the history
Previously we'd log 4 times for each request.

This change moves all the fields into a single line that's emitted while
we hold the state lock, so that we maintain the guarantee that log lines
are produced in order of the changes that were made to the state.

Part of neondatabase/cloud#15591.
  • Loading branch information
sharnoff authored Jul 23, 2024
1 parent 3b8e2b2 commit 047cf64
Showing 1 changed file with 24 additions and 26 deletions.
50 changes: 24 additions & 26 deletions pkg/plugin/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,10 @@ func (e *AutoscaleEnforcer) startPermitHandler(ctx context.Context, logger *zap.
return
}

logger = logger.With(zap.Object("pod", req.Pod))
logger.Info(
"Received autoscaler-agent request",
zap.String("client", r.RemoteAddr), zap.Any("request", req),
logger = logger.With(
zap.Object("pod", req.Pod),
zap.String("client", r.RemoteAddr),
zap.Any("request", req),
)

resp, statusCode, err := e.handleAgentRequest(logger, req)
Expand Down Expand Up @@ -104,12 +104,6 @@ func (e *AutoscaleEnforcer) startPermitHandler(ctx context.Context, logger *zap.
logger.Panic("Failed to encode response JSON", zap.Error(err))
}

logger.Info(
"Responding to autoscaler-agent request",
zap.Int("status", statusCode),
zap.Any("response", resp),
)

w.Header().Add("Content-Type", ContentTypeJSON)
w.WriteHeader(statusCode)
_, _ = w.Write(responseBody)
Expand Down Expand Up @@ -220,8 +214,7 @@ func (e *AutoscaleEnforcer) handleAgentRequest(

supportsFractionalCPU := req.ProtoVersion.SupportsFractionalCPU()

permit, status, err := e.handleResources(
logger,
verdict, permit, status, err := e.handleResources(
pod,
node,
req.ComputeUnit,
Expand Down Expand Up @@ -250,26 +243,34 @@ func (e *AutoscaleEnforcer) handleAgentRequest(
}
}

status = 200
resp := api.PluginResponse{
Permit: permit,
Migrate: migrateDecision,
}
return &resp, 200, nil

logger.Info(
"Handled agent request",
zap.Object("verdict", verdict),
zap.Int("status", status),
zap.Any("response", resp),
)

return &resp, status, nil
}

func (e *AutoscaleEnforcer) handleResources(
logger *zap.Logger,
pod *podState,
node *nodeState,
cu api.Resources,
req api.Resources,
lastPermit *api.Resources,
startingMigration bool,
supportsFractionalCPU bool,
) (api.Resources, int, error) {
) (verdictSet, api.Resources, int, error) {
if !supportsFractionalCPU && req.VCPU%1000 != 0 {
err := errors.New("agent requested fractional CPU with protocol version that does not support it")
return api.Resources{}, 400, err
return verdictSet{}, api.Resources{}, 400, err
}

// Check that we aren't being asked to do something during migration:
Expand All @@ -278,9 +279,11 @@ func (e *AutoscaleEnforcer) handleResources(
// migrating.
if req.VCPU != pod.cpu.Reserved || req.Mem != pod.mem.Reserved {
err := errors.New("cannot change resources: agent has already been informed that pod is migrating")
return api.Resources{}, 400, err
return verdictSet{}, api.Resources{}, 400, err
}
return api.Resources{VCPU: pod.cpu.Reserved, Mem: pod.mem.Reserved}, 200, nil
message := "No change because pod is migrating"
verdict := verdictSet{cpu: message, mem: message}
return verdict, api.Resources{VCPU: pod.cpu.Reserved, Mem: pod.mem.Reserved}, 200, nil
}

cpuFactor := cu.VCPU
Expand All @@ -301,15 +304,10 @@ func (e *AutoscaleEnforcer) handleResources(
memVerdict := makeResourceTransitioner(&node.mem, &pod.mem).
handleRequested(req.Mem, lastMemPermit, startingMigration, memFactor)

logger.Info(
"Handled requested resources from pod",
zap.Object("verdict", verdictSet{
cpu: cpuVerdict,
mem: memVerdict,
}),
)
verdict := verdictSet{cpu: cpuVerdict, mem: memVerdict}
permit := api.Resources{VCPU: pod.cpu.Reserved, Mem: pod.mem.Reserved}

return api.Resources{VCPU: pod.cpu.Reserved, Mem: pod.mem.Reserved}, 200, nil
return verdict, permit, 200, nil
}

func (e *AutoscaleEnforcer) updateMetricsAndCheckMustMigrate(
Expand Down

0 comments on commit 047cf64

Please sign in to comment.