Skip to content

Commit

Permalink
e2: reduce prune limit on chain tip (#12953)
Browse files Browse the repository at this point in the history
e2 periodically spending too much time for pruning on chain-tip

- chain-tip: less aggressive prune
- non-chain-tip: more aggressive prune
  • Loading branch information
AskAlexSharov authored Dec 2, 2024
1 parent 69aa4ee commit d75b5a0
Show file tree
Hide file tree
Showing 7 changed files with 20 additions and 10 deletions.
2 changes: 1 addition & 1 deletion cmd/integration/commands/stages.go
Original file line number Diff line number Diff line change
Expand Up @@ -1460,7 +1460,7 @@ func stageTxLookup(db kv.RwDB, ctx context.Context, logger log.Logger) error {
logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber)

br, _ := blocksIO(db, logger)
cfg := stagedsync.StageTxLookupCfg(db, pm, dirs.Tmp, chainConfig.Bor, br)
cfg := stagedsync.StageTxLookupCfg(db, pm, ethconfig.Defaults.Sync, dirs.Tmp, chainConfig.Bor, br)
if unwind > 0 {
u := sync.NewUnwindState(stages.TxLookup, s.BlockNumber-unwind, s.BlockNumber)
err = stagedsync.UnwindTxLookup(u, s, tx, cfg, ctx, logger)
Expand Down
2 changes: 1 addition & 1 deletion eth/ethconfig/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ var Defaults = Config{
ReconWorkerCount: estimate.ReconstituteState.Workers(),
BodyCacheLimit: 256 * 1024 * 1024,
BodyDownloadTimeoutSeconds: 2,
PruneLimit: 100,
PruneLimit: 10,
},
Ethash: ethashcfg.Config{
CachesInMem: 2,
Expand Down
6 changes: 5 additions & 1 deletion eth/stagedsync/stage_snapshots.go
Original file line number Diff line number Diff line change
Expand Up @@ -505,7 +505,11 @@ func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx cont
//cfg.agg.BuildFilesInBackground()
}

if err := cfg.blockRetire.PruneAncientBlocks(tx, cfg.syncConfig.PruneLimit); err != nil {
pruneLimit := cfg.syncConfig.PruneLimit
if initialCycle { //aggressive prune now. because on chain-tip have limited time to prune
pruneLimit = 10_000
}
if err := cfg.blockRetire.PruneAncientBlocks(tx, pruneLimit); err != nil {
return err
}
}
Expand Down
10 changes: 8 additions & 2 deletions eth/stagedsync/stage_txlookup.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"fmt"
"math/big"

"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/log/v3"

"github.com/ledgerwatch/erigon-lib/chain"
Expand All @@ -24,6 +25,7 @@ import (
type TxLookupCfg struct {
db kv.RwDB
prune prune.Mode
syncConfig ethconfig.Sync
tmpdir string
borConfig *borcfg.BorConfig
blockReader services.FullBlockReader
Expand All @@ -32,6 +34,7 @@ type TxLookupCfg struct {
func StageTxLookupCfg(
db kv.RwDB,
prune prune.Mode,
syncConfig ethconfig.Sync,
tmpdir string,
borConfigInterface chain.BorConfig,
blockReader services.FullBlockReader,
Expand All @@ -44,6 +47,7 @@ func StageTxLookupCfg(
return TxLookupCfg{
db: db,
prune: prune,
syncConfig: syncConfig,
tmpdir: tmpdir,
borConfig: borConfig,
blockReader: blockReader,
Expand Down Expand Up @@ -237,8 +241,10 @@ func PruneTxLookup(s *PruneState, tx kv.RwTx, cfg TxLookupCfg, ctx context.Conte
} else if cfg.blockReader.FreezingCfg().Enabled {
blockTo = cfg.blockReader.CanPruneTo(s.ForwardProgress)
}
// can't prune much here: because tx_lookup index has crypto-hashed-keys, and 1 block producing hundreds of deletes
blockTo = cmp.Min(blockTo, blockFrom+10)
// chain-tip has limited time to prune: but tx_lookup index has crypto-hashed-keys, and 1 block producing hundreds of random deletes
if !initialCycle { //on non-chain-tip prune as much as we can
blockTo = cmp.Min(blockTo, blockFrom+uint64(cfg.syncConfig.PruneLimit))
}

if blockFrom < blockTo {
if err = deleteTxLookupRange(tx, logPrefix, blockFrom, blockTo, ctx, cfg, logger); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion turbo/cli/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ var (
SyncLoopPruneLimitFlag = cli.UintFlag{
Name: "sync.loop.prune.limit",
Usage: "Sets the maximum number of block to prune per loop iteration",
Value: 100,
Value: uint(ethconfig.Defaults.Sync.PruneLimit),
}

SyncLoopBreakAfterFlag = cli.StringFlag{
Expand Down
2 changes: 1 addition & 1 deletion turbo/stages/mock/mock_sentry.go
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK
stagedsync.StageHistoryCfg(mock.DB, prune, dirs.Tmp),
stagedsync.StageLogIndexCfg(mock.DB, prune, dirs.Tmp, nil),
stagedsync.StageCallTracesCfg(mock.DB, prune, 0, dirs.Tmp),
stagedsync.StageTxLookupCfg(mock.DB, prune, dirs.Tmp, mock.ChainConfig.Bor, mock.BlockReader),
stagedsync.StageTxLookupCfg(mock.DB, prune, cfg.Sync, dirs.Tmp, mock.ChainConfig.Bor, mock.BlockReader),
stagedsync.StageFinishCfg(mock.DB, dirs.Tmp, forkValidator),
!withPosDownloader),
stagedsync.DefaultUnwindOrder,
Expand Down
6 changes: 3 additions & 3 deletions turbo/stages/stageloop.go
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,7 @@ func NewDefaultStages(ctx context.Context,
stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp),
stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp, &depositContract),
stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp),
stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader),
stagedsync.StageTxLookupCfg(db, cfg.Prune, cfg.Sync, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader),
stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator),
runInTestMode)
}
Expand Down Expand Up @@ -607,7 +607,7 @@ func NewPipelineStages(ctx context.Context,
stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp),
stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp, &depositContract),
stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp),
stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader),
stagedsync.StageTxLookupCfg(db, cfg.Prune, cfg.Sync, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader),
stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator),
runInTestMode)
}
Expand Down Expand Up @@ -643,7 +643,7 @@ func NewPipelineStages(ctx context.Context,
stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp),
stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp, &depositContract),
stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp),
stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader),
stagedsync.StageTxLookupCfg(db, cfg.Prune, cfg.Sync, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader),
stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator),
runInTestMode)

Expand Down

0 comments on commit d75b5a0

Please sign in to comment.