diff --git a/.golangci.yml b/.golangci.yml index 497ee05..102a960 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,7 +5,7 @@ linters: - dogsled - dupl - errcheck - - exportloopref + - copyloopvar - goconst - gofmt - goimports @@ -23,12 +23,12 @@ linters: - typecheck - unconvert - unused - issues: exclude-rules: - path: _test\.go linters: - gosec + - unused - linters: - stylecheck text: "ST1003:" diff --git a/bot/bot.go b/bot/bot.go index a2506a5..9528f19 100644 --- a/bot/bot.go +++ b/bot/bot.go @@ -3,18 +3,16 @@ package bot import ( "encoding/json" "errors" - "fmt" "io" "os" - "go.uber.org/zap" - bottypes "github.com/initia-labs/opinit-bots/bot/types" "github.com/initia-labs/opinit-bots/challenger" challengertypes "github.com/initia-labs/opinit-bots/challenger/types" - "github.com/initia-labs/opinit-bots/db" "github.com/initia-labs/opinit-bots/executor" executortypes "github.com/initia-labs/opinit-bots/executor/types" + "github.com/initia-labs/opinit-bots/server" + "github.com/initia-labs/opinit-bots/types" ) func LoadJsonConfig(path string, config bottypes.Config) error { @@ -35,17 +33,12 @@ func LoadJsonConfig(path string, config bottypes.Config) error { return nil } -func NewBot(botType bottypes.BotType, logger *zap.Logger, homePath string, configPath string) (bottypes.Bot, error) { +func NewBot(botType bottypes.BotType, db types.DB, configPath string) (bottypes.Bot, error) { err := botType.Validate() if err != nil { return nil, err } - db, err := db.NewDB(GetDBPath(homePath, botType)) - if err != nil { - return nil, err - } - switch botType { case bottypes.BotTypeExecutor: cfg := &executortypes.Config{} @@ -53,18 +46,16 @@ func NewBot(botType bottypes.BotType, logger *zap.Logger, homePath string, confi if err != nil { return nil, err } - return executor.NewExecutor(cfg, db, logger.Named("executor"), homePath), nil + server := server.NewServer(cfg.Server) + return executor.NewExecutor(cfg, db, server), nil case bottypes.BotTypeChallenger: cfg := &challengertypes.Config{} err := LoadJsonConfig(configPath, cfg) if err != nil { return nil, err } - return challenger.NewChallenger(cfg, db, logger.Named("challenger"), homePath), nil + server := server.NewServer(cfg.Server) + return challenger.NewChallenger(cfg, db, server), nil } return nil, errors.New("not providing bot name") } - -func GetDBPath(homePath string, botName bottypes.BotType) string { - return fmt.Sprintf(homePath+"/%s.db", botName) -} diff --git a/bot/types/bot.go b/bot/types/bot.go index 0798954..4038d5b 100644 --- a/bot/types/bot.go +++ b/bot/types/bot.go @@ -1,11 +1,10 @@ package types import ( - "context" + "github.com/initia-labs/opinit-bots/types" ) type Bot interface { - Initialize(context.Context) error - Start(context.Context) error - Close() + Initialize(types.Context) error + Start(types.Context) error } diff --git a/challenger/challenger.go b/challenger/challenger.go index 8e3b7d3..31b0405 100644 --- a/challenger/challenger.go +++ b/challenger/challenger.go @@ -1,8 +1,6 @@ package challenger import ( - "context" - "strconv" "sync" "time" @@ -10,6 +8,7 @@ import ( "github.com/gofiber/fiber/v2" "github.com/initia-labs/opinit-bots/challenger/child" + challengerdb "github.com/initia-labs/opinit-bots/challenger/db" "github.com/initia-labs/opinit-bots/challenger/host" "github.com/initia-labs/opinit-bots/server" @@ -32,9 +31,6 @@ type Challenger struct { cfg *challengertypes.Config db types.DB server *server.Server - logger *zap.Logger - - homePath string challengeCh chan challengertypes.Challenge challengeChStopped chan struct{} @@ -44,9 +40,11 @@ type Challenger struct { // status info latestChallengesMu *sync.Mutex latestChallenges []challengertypes.Challenge + + stage types.CommitDB } -func NewChallenger(cfg *challengertypes.Config, db types.DB, logger *zap.Logger, homePath string) *Challenger { +func NewChallenger(cfg *challengertypes.Config, db types.DB, sv *server.Server) *Challenger { err := cfg.Validate() if err != nil { panic(err) @@ -55,22 +53,17 @@ func NewChallenger(cfg *challengertypes.Config, db types.DB, logger *zap.Logger, challengeCh := make(chan challengertypes.Challenge) return &Challenger{ host: host.NewHostV1( - cfg.L1NodeConfig(homePath), + cfg.L1NodeConfig(), db.WithPrefix([]byte(types.HostName)), - logger.Named(types.HostName), ), child: child.NewChildV1( - cfg.L2NodeConfig(homePath), + cfg.L2NodeConfig(), db.WithPrefix([]byte(types.ChildName)), - logger.Named(types.ChildName), ), cfg: cfg, db: db, - server: server.NewServer(cfg.Server), - logger: logger, - - homePath: homePath, + server: sv, challengeCh: challengeCh, challengeChStopped: make(chan struct{}), @@ -79,10 +72,12 @@ func NewChallenger(cfg *challengertypes.Config, db types.DB, logger *zap.Logger, latestChallengesMu: &sync.Mutex{}, latestChallenges: make([]challengertypes.Challenge, 0), + + stage: db.NewStage(), } } -func (c *Challenger) Initialize(ctx context.Context) error { +func (c *Challenger) Initialize(ctx types.Context) error { childBridgeInfo, err := c.child.QueryBridgeInfo(ctx) if err != nil { return err @@ -96,7 +91,7 @@ func (c *Challenger) Initialize(ctx context.Context) error { return err } - c.logger.Info( + ctx.Logger().Info( "bridge info", zap.Uint64("id", bridgeInfo.BridgeId), zap.Duration("submission_interval", bridgeInfo.BridgeConfig.SubmissionInterval), @@ -128,7 +123,7 @@ func (c *Challenger) Initialize(ctx context.Context) error { if !initialBlockTime.IsZero() { // The db state is reset to a specific height, so we also // need to delete future challenges which are not applicable anymore. - err := c.DeleteFutureChallenges(initialBlockTime) + err := challengerdb.DeleteFutureChallenges(c.db, initialBlockTime) if err != nil { return err } @@ -136,56 +131,49 @@ func (c *Challenger) Initialize(ctx context.Context) error { c.RegisterQuerier() - c.pendingChallenges, err = c.loadPendingChallenges() + c.pendingChallenges, err = challengerdb.LoadPendingChallenges(c.db) if err != nil { - return err + return errors.Wrap(err, "failed to load pending challenges") } - c.latestChallenges, err = c.loadChallenges() + c.latestChallenges, err = challengerdb.LoadChallenges(c.db) if err != nil { - return err + return errors.Wrap(err, "failed to load challenges") } return nil } -func (c *Challenger) Start(ctx context.Context) error { - defer c.Close() - - errGrp := types.ErrGrp(ctx) - errGrp.Go(func() (err error) { +func (c *Challenger) Start(ctx types.Context) error { + ctx.ErrGrp().Go(func() (err error) { <-ctx.Done() return c.server.Shutdown() }) - errGrp.Go(func() (err error) { + ctx.ErrGrp().Go(func() (err error) { defer func() { - c.logger.Info("api server stopped") + ctx.Logger().Info("api server stopped") }() return c.server.Start() }) - errGrp.Go(func() error { + ctx.ErrGrp().Go(func() error { for _, ch := range c.pendingChallenges { c.challengeCh <- ch } return nil }) - errGrp.Go(func() (err error) { + ctx.ErrGrp().Go(func() (err error) { defer func() { - c.logger.Info("challenge handler stopped") + ctx.Logger().Info("challenge handler stopped") }() return c.challengeHandler(ctx) }) c.host.Start(ctx) c.child.Start(ctx) - return errGrp.Wait() -} - -func (c *Challenger) Close() { - c.db.Close() + return ctx.ErrGrp().Wait() } func (c *Challenger) RegisterQuerier() { @@ -197,16 +185,25 @@ func (c *Challenger) RegisterQuerier() { return ctx.JSON(status) }) - c.server.RegisterQuerier("/challenges/:page", func(ctx *fiber.Ctx) error { - pageStr := ctx.Params("page") - if pageStr == "" { - pageStr = "1" + c.server.RegisterQuerier("/challenges", func(ctx *fiber.Ctx) error { + next := ctx.Query("next", "") + limit := ctx.QueryInt("limit", 10) + if limit > 100 { + limit = 100 } - page, err := strconv.ParseUint(pageStr, 10, 64) + + ulimit, err := types.SafeInt64ToUint64(int64(limit)) if err != nil { - return err + return errors.Wrap(err, "failed to convert limit") } - res, err := c.QueryChallenges(page) + + descOrder := true + orderStr := ctx.Query("order", "desc") + if orderStr == "asc" { + descOrder = false + } + + res, err := c.QueryChallenges(next, ulimit, descOrder) if err != nil { return err } @@ -230,7 +227,11 @@ func (c *Challenger) RegisterQuerier() { }) } -func (c *Challenger) getProcessedHeights(ctx context.Context, bridgeId uint64) (l1ProcessedHeight int64, l2ProcessedHeight int64, processedOutputIndex uint64, err error) { +func (c *Challenger) getProcessedHeights(ctx types.Context, bridgeId uint64) (l1ProcessedHeight int64, l2ProcessedHeight int64, processedOutputIndex uint64, err error) { + if c.host.Node().GetSyncedHeight() != 0 && c.child.Node().GetSyncedHeight() != 0 { + return 0, 0, 0, nil + } + var outputL1BlockNumber int64 // get the last submitted output height before the start height from the host if c.cfg.L2StartHeight != 0 { @@ -284,3 +285,7 @@ func (c *Challenger) getProcessedHeights(ctx context.Context, bridgeId uint64) ( return l1ProcessedHeight, l2ProcessedHeight, processedOutputIndex, err } + +func (c Challenger) DB() types.DB { + return c.db +} diff --git a/challenger/child/child.go b/challenger/child/child.go index 349d409..181213f 100644 --- a/challenger/child/child.go +++ b/challenger/child/child.go @@ -4,10 +4,9 @@ import ( "context" "time" - "go.uber.org/zap" - opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/pkg/errors" nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" @@ -18,7 +17,7 @@ import ( ) type challenger interface { - PendingChallengeToRawKVs([]challengertypes.Challenge, bool) ([]types.RawKV, error) + DB() types.DB SendPendingChallenges([]challengertypes.Challenge) } @@ -43,21 +42,24 @@ type Child struct { lastFinalizedDepositL1Sequence uint64 lastOutputTime time.Time nextOutputTime time.Time + + stage types.CommitDB } func NewChildV1( cfg nodetypes.NodeConfig, - db types.DB, logger *zap.Logger, + db types.DB, ) *Child { return &Child{ - BaseChild: childprovider.NewBaseChildV1(cfg, db, logger), - eventHandler: eventhandler.NewChallengeEventHandler(db, logger), + BaseChild: childprovider.NewBaseChildV1(cfg, db), + eventHandler: eventhandler.NewChallengeEventHandler(db), eventQueue: make([]challengertypes.ChallengeEvent, 0), + stage: db.NewStage(), } } func (ch *Child) Initialize( - ctx context.Context, + ctx types.Context, processedHeight int64, startOutputIndex uint64, host hostNode, @@ -74,7 +76,7 @@ func (ch *Child) Initialize( false, ) if err != nil { - return time.Time{}, err + return time.Time{}, errors.Wrap(err, "failed to initialize base child") } ch.host = host ch.challenger = challenger @@ -82,7 +84,7 @@ func (ch *Child) Initialize( err = ch.eventHandler.Initialize(bridgeInfo.BridgeConfig.SubmissionInterval) if err != nil { - return time.Time{}, err + return time.Time{}, errors.Wrap(err, "failed to initialize event handler") } var blockTime time.Time @@ -91,7 +93,7 @@ func (ch *Child) Initialize( if ch.Node().HeightInitialized() { blockTime, err = ch.Node().QueryBlockTime(ctx, ch.Node().GetHeight()) if err != nil { - return time.Time{}, err + return time.Time{}, errors.Wrap(err, "failed to query block time") } } return blockTime, nil @@ -105,10 +107,6 @@ func (ch *Child) registerHandlers() { ch.Node().RegisterEndBlockHandler(ch.endBlockHandler) } -func (ch *Child) PendingEventsToRawKV(events []challengertypes.ChallengeEvent, delete bool) ([]types.RawKV, error) { - return ch.eventHandler.PendingEventsToRawKV(events, delete) -} - func (ch *Child) SetPendingEvents(events []challengertypes.ChallengeEvent) { ch.eventHandler.SetPendingEvents(events) } diff --git a/challenger/child/deposit.go b/challenger/child/deposit.go index cbfc54c..7b58b65 100644 --- a/challenger/child/deposit.go +++ b/challenger/child/deposit.go @@ -1,34 +1,35 @@ package child import ( - "context" "time" childprovider "github.com/initia-labs/opinit-bots/provider/child" + "github.com/initia-labs/opinit-bots/types" "go.uber.org/zap" sdk "github.com/cosmos/cosmos-sdk/types" challengertypes "github.com/initia-labs/opinit-bots/challenger/types" nodetypes "github.com/initia-labs/opinit-bots/node/types" + "github.com/pkg/errors" ) -func (ch *Child) finalizeDepositHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (ch *Child) finalizeDepositHandler(ctx types.Context, args nodetypes.EventHandlerArgs) error { l1BlockHeight, l1Sequence, from, to, baseDenom, amount, err := childprovider.ParseFinalizeDeposit(args.EventAttributes) if err != nil { - return err + return errors.Wrap(err, "failed to parse finalize deposit event") } - ch.handleFinalizeDeposit(args.BlockTime, l1BlockHeight, l1Sequence, from, to, amount, baseDenom) + ch.handleFinalizeDeposit(ctx, args.BlockTime, l1BlockHeight, l1Sequence, from, to, amount, baseDenom) ch.lastFinalizedDepositL1BlockHeight = l1BlockHeight ch.lastFinalizedDepositL1Sequence = l1Sequence return nil } -func (ch *Child) handleFinalizeDeposit(l2BlockTime time.Time, l1BlockHeight int64, l1Sequence uint64, from string, to string, amount sdk.Coin, baseDenom string) { +func (ch *Child) handleFinalizeDeposit(ctx types.Context, l2BlockTime time.Time, l1BlockHeight int64, l1Sequence uint64, from string, to string, amount sdk.Coin, baseDenom string) { deposit := challengertypes.NewDeposit(l1Sequence, l1BlockHeight, from, to, baseDenom, amount.String(), l2BlockTime) ch.eventQueue = append(ch.eventQueue, deposit) - ch.Logger().Info("finalize token deposit", + ctx.Logger().Info("finalize token deposit", zap.Int64("l1_blockHeight", l1BlockHeight), zap.Uint64("l1_sequence", l1Sequence), zap.String("from", from), diff --git a/challenger/child/handler.go b/challenger/child/handler.go index 662ddb1..4b71cc9 100644 --- a/challenger/child/handler.go +++ b/challenger/child/handler.go @@ -1,65 +1,64 @@ package child import ( - "context" - "errors" - + eventhandler "github.com/initia-labs/opinit-bots/challenger/eventhandler" challengertypes "github.com/initia-labs/opinit-bots/challenger/types" + "github.com/initia-labs/opinit-bots/node" nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/txutils" "github.com/initia-labs/opinit-bots/types" opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + challengerdb "github.com/initia-labs/opinit-bots/challenger/db" + "github.com/pkg/errors" ) -func (ch *Child) beginBlockHandler(ctx context.Context, args nodetypes.BeginBlockArgs) (err error) { +func (ch *Child) beginBlockHandler(ctx types.Context, args nodetypes.BeginBlockArgs) (err error) { blockHeight := args.Block.Header.Height ch.eventQueue = ch.eventQueue[:0] - - if ch.Merkle() == nil { - return errors.New("merkle is not initialized") - } + ch.stage.Reset() err = ch.prepareTree(blockHeight) if err != nil { - return err + return errors.Wrap(err, "failed to prepare tree") } err = ch.prepareOutput(ctx) if err != nil { - return err + return errors.Wrap(err, "failed to prepare output") } return nil } -func (ch *Child) endBlockHandler(_ context.Context, args nodetypes.EndBlockArgs) error { +func (ch *Child) endBlockHandler(ctx types.Context, args nodetypes.EndBlockArgs) error { blockHeight := args.Block.Header.Height - batchKVs := make([]types.RawKV, 0) pendingChallenges := make([]challengertypes.Challenge, 0) - treeKVs, storageRoot, err := ch.handleTree(blockHeight, args.Block.Header) + storageRoot, err := ch.handleTree(ctx, blockHeight, args.Block.Header) if err != nil { - return err + return errors.Wrap(err, "failed to handle tree") } - batchKVs = append(batchKVs, treeKVs...) if storageRoot != nil { - workingTreeIndex, err := ch.GetWorkingTreeIndex() + workingTree, err := ch.WorkingTree() if err != nil { - return err + return errors.Wrap(err, "failed to get working tree") } - err = ch.handleOutput(args.Block.Header.Time, blockHeight, ch.Version(), args.BlockID, workingTreeIndex, storageRoot) + err = ch.handleOutput(args.Block.Header.Time, blockHeight, ch.Version(), args.BlockID, workingTree.Index, storageRoot) if err != nil { - return err + return errors.Wrap(err, "failed to handle output") } } // update the sync info - batchKVs = append(batchKVs, ch.Node().SyncInfoToRawKV(blockHeight)) + err = node.SetSyncedHeight(ch.stage, args.Block.Header.Height) + if err != nil { + return errors.Wrap(err, "failed to set synced height") + } // check value for pending events - challenges, processedEvents, err := ch.eventHandler.CheckValue(ch.eventQueue) + challenges, processedEvents, err := ch.eventHandler.CheckValue(ctx, ch.eventQueue) if err != nil { return err } @@ -70,27 +69,23 @@ func (ch *Child) endBlockHandler(_ context.Context, args nodetypes.EndBlockArgs) challenges, timeoutEvents := ch.eventHandler.CheckTimeout(args.Block.Header.Time, unprocessedEvents) pendingChallenges = append(pendingChallenges, challenges...) - // update timeout pending events - eventKvs, err := ch.PendingEventsToRawKV(timeoutEvents, false) + err = eventhandler.SavePendingEvents(ch.stage, timeoutEvents) if err != nil { return err } - batchKVs = append(batchKVs, eventKvs...) // delete processed events - eventKVs, err := ch.PendingEventsToRawKV(processedEvents, true) + err = eventhandler.DeletePendingEvents(ch.stage, processedEvents) if err != nil { return err } - batchKVs = append(batchKVs, eventKVs...) - challengesKVs, err := ch.challenger.PendingChallengeToRawKVs(pendingChallenges, false) + err = challengerdb.SavePendingChallenges(ch.stage.WithPrefixedKey(ch.challenger.DB().PrefixedKey), pendingChallenges) if err != nil { - return err + return errors.Wrap(err, "failed to save pending events on child db") } - batchKVs = append(batchKVs, challengesKVs...) - err = ch.DB().RawBatchSet(batchKVs...) + err = ch.stage.Commit() if err != nil { return err } @@ -101,7 +96,7 @@ func (ch *Child) endBlockHandler(_ context.Context, args nodetypes.EndBlockArgs) return nil } -func (ch *Child) txHandler(_ context.Context, args nodetypes.TxHandlerArgs) error { +func (ch *Child) txHandler(ctx types.Context, args nodetypes.TxHandlerArgs) error { // ignore failed tx if !args.Success { return nil @@ -123,6 +118,6 @@ func (ch *Child) txHandler(_ context.Context, args nodetypes.TxHandlerArgs) erro if !ok { return nil } - ch.oracleTxHandler(args.BlockTime, msg.Sender, types.MustUint64ToInt64(msg.Height), msg.Data) + ch.oracleTxHandler(ctx, args.BlockTime, msg.Sender, types.MustUint64ToInt64(msg.Height), msg.Data) return nil } diff --git a/challenger/child/oracle.go b/challenger/child/oracle.go index c34687f..24d0c34 100644 --- a/challenger/child/oracle.go +++ b/challenger/child/oracle.go @@ -5,16 +5,17 @@ import ( comettypes "github.com/cometbft/cometbft/types" challengertypes "github.com/initia-labs/opinit-bots/challenger/types" + "github.com/initia-labs/opinit-bots/types" "go.uber.org/zap" ) -func (ch *Child) oracleTxHandler(blockTime time.Time, sender string, l1BlockHeight int64, oracleDataBytes comettypes.Tx) { +func (ch *Child) oracleTxHandler(ctx types.Context, blockTime time.Time, sender string, l1BlockHeight int64, oracleDataBytes comettypes.Tx) { checksum := challengertypes.OracleChecksum(oracleDataBytes) oracle := challengertypes.NewOracle(l1BlockHeight, checksum, blockTime) ch.eventQueue = append(ch.eventQueue, oracle) ch.lastUpdatedOracleL1Height = l1BlockHeight - ch.Logger().Info("update oracle", + ctx.Logger().Info("update oracle", zap.Int64("l1_blockHeight", l1BlockHeight), zap.String("from", sender), ) diff --git a/challenger/child/status.go b/challenger/child/status.go index 7ef58fe..5f37a2f 100644 --- a/challenger/child/status.go +++ b/challenger/child/status.go @@ -29,15 +29,7 @@ func (ch Child) GetStatus() (Status, error) { return Status{}, errors.New("node is not initialized") } - workingTreeLeafCount, err := ch.GetWorkingTreeLeafCount() - if err != nil { - return Status{}, err - } - startLeafIndex, err := ch.GetStartLeafIndex() - if err != nil { - return Status{}, err - } - workingTreeIndex, err := ch.GetWorkingTreeIndex() + workingTree, err := ch.WorkingTree() if err != nil { return Status{}, err } @@ -51,8 +43,8 @@ func (ch Child) GetStatus() (Status, error) { LastUpdatedOracleL1Height: ch.lastUpdatedOracleL1Height, LastFinalizedDepositL1BlockHeight: ch.lastFinalizedDepositL1BlockHeight, LastFinalizedDepositL1Sequence: ch.lastFinalizedDepositL1Sequence, - LastWithdrawalL2Sequence: workingTreeLeafCount + startLeafIndex - 1, - WorkingTreeIndex: workingTreeIndex, + LastWithdrawalL2Sequence: workingTree.LeafCount + workingTree.StartLeafIndex - 1, + WorkingTreeIndex: workingTree.Index, FinalizingBlockHeight: ch.finalizingBlockHeight, LastOutputSubmissionTime: ch.lastOutputTime, NextOutputSubmissionTime: ch.nextOutputTime, diff --git a/challenger/child/withdraw.go b/challenger/child/withdraw.go index 4776af4..e1f164b 100644 --- a/challenger/child/withdraw.go +++ b/challenger/child/withdraw.go @@ -9,6 +9,7 @@ import ( ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" challengertypes "github.com/initia-labs/opinit-bots/challenger/types" + "github.com/initia-labs/opinit-bots/merkle" "github.com/initia-labs/opinit-bots/types" "github.com/pkg/errors" "go.uber.org/zap" @@ -19,23 +20,41 @@ import ( childprovider "github.com/initia-labs/opinit-bots/provider/child" ) -func (ch *Child) initiateWithdrawalHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (ch *Child) initiateWithdrawalHandler(ctx types.Context, args nodetypes.EventHandlerArgs) error { l2Sequence, amount, from, to, baseDenom, err := childprovider.ParseInitiateWithdrawal(args.EventAttributes) if err != nil { - return err + return errors.Wrap(err, "failed to parse initiate withdrawal event") + } + err = ch.handleInitiateWithdrawal(ctx, l2Sequence, from, to, baseDenom, amount) + if err != nil { + return errors.Wrap(err, "failed to handle initiate withdrawal") } - return ch.handleInitiateWithdrawal(l2Sequence, from, to, baseDenom, amount) + return nil } -func (ch *Child) handleInitiateWithdrawal(l2Sequence uint64, from string, to string, baseDenom string, amount uint64) error { +func (ch *Child) handleInitiateWithdrawal(ctx types.Context, l2Sequence uint64, from string, to string, baseDenom string, amount uint64) error { withdrawalHash := ophosttypes.GenerateWithdrawalHash(ch.BridgeId(), l2Sequence, from, to, baseDenom, amount) + + workingTree, err := ch.WorkingTree() + if err != nil { + return errors.Wrap(err, "failed to get working tree") + } + + if workingTree.StartLeafIndex+workingTree.LeafCount != l2Sequence { + panic(fmt.Errorf("INVARIANT failed; handleInitiateWithdrawal expect to working tree at leaf `%d` (start `%d` + count `%d`) but we got leaf `%d`", workingTree.StartLeafIndex+workingTree.LeafCount, workingTree.StartLeafIndex, workingTree.LeafCount, l2Sequence)) + } + // generate merkle tree - err := ch.Merkle().InsertLeaf(withdrawalHash[:]) + newNodes, err := ch.Merkle().InsertLeaf(withdrawalHash[:]) if err != nil { - return err + return errors.Wrap(err, "failed to insert leaf to merkle tree") + } + err = merkle.SaveNodes(ch.stage, newNodes...) + if err != nil { + return errors.Wrap(err, "failed to save new tree nodes") } - ch.Logger().Info("initiate token withdrawal", + ctx.Logger().Info("initiate token withdrawal", zap.Uint64("l2_sequence", l2Sequence), zap.String("from", from), zap.String("to", to), @@ -47,40 +66,45 @@ func (ch *Child) handleInitiateWithdrawal(l2Sequence uint64, from string, to str } func (ch *Child) prepareTree(blockHeight int64) error { - err := ch.Merkle().LoadWorkingTree(types.MustInt64ToUint64(blockHeight - 1)) - if err == dbtypes.ErrNotFound { + workingTree, err := merkle.GetWorkingTree(ch.DB(), types.MustInt64ToUint64(blockHeight-1)) + if errors.Is(err, dbtypes.ErrNotFound) { if ch.InitializeTree(blockHeight) { return nil } // must not happened panic(fmt.Errorf("working tree not found at height: %d, current: %d", blockHeight-1, blockHeight)) } else if err != nil { - return err + return errors.Wrap(err, "failed to get working tree") + } + + err = ch.Merkle().PrepareWorkingTree(workingTree) + if err != nil { + return errors.Wrap(err, "failed to prepare working tree") } return nil } func (ch *Child) prepareOutput(ctx context.Context) error { - workingTreeIndex, err := ch.GetWorkingTreeIndex() + workingTree, err := ch.WorkingTree() if err != nil { return err } // initialize next output time - if ch.nextOutputTime.IsZero() && workingTreeIndex > 1 { - output, err := ch.host.QuerySyncedOutput(ctx, ch.BridgeId(), workingTreeIndex-1) + if ch.nextOutputTime.IsZero() && workingTree.Index > 1 { + output, err := ch.host.QuerySyncedOutput(ctx, ch.BridgeId(), workingTree.Index-1) if err != nil { // TODO: maybe not return error here and roll back - return fmt.Errorf("output does not exist at index: %d", workingTreeIndex-1) + return fmt.Errorf("output does not exist at index: %d", workingTree.Index-1) } ch.lastOutputTime = output.OutputProposal.L1BlockTime } - output, err := ch.host.QuerySyncedOutput(ctx, ch.BridgeId(), workingTreeIndex) + output, err := ch.host.QuerySyncedOutput(ctx, ch.BridgeId(), workingTree.Index) if err != nil { if strings.Contains(err.Error(), "collections: not found") { // should check the existing output. - return errors.Wrap(nodetypes.ErrIgnoreAndTryLater, fmt.Sprintf("output does not exist: %d", workingTreeIndex)) + return errors.Wrap(nodetypes.ErrIgnoreAndTryLater, fmt.Sprintf("output does not exist: %d", workingTree.Index)) } return err } else { @@ -90,7 +114,7 @@ func (ch *Child) prepareOutput(ctx context.Context) error { return nil } -func (ch *Child) handleTree(blockHeight int64, blockHeader cmtproto.Header) (kvs []types.RawKV, storageRoot []byte, err error) { +func (ch *Child) handleTree(ctx types.Context, blockHeight int64, blockHeader cmtproto.Header) (storageRoot []byte, err error) { // panic if we passed the finalizing block height // this must not happened if ch.finalizingBlockHeight != 0 && ch.finalizingBlockHeight < blockHeight { @@ -98,38 +122,51 @@ func (ch *Child) handleTree(blockHeight int64, blockHeader cmtproto.Header) (kvs } if ch.finalizingBlockHeight == blockHeight { - kvs, storageRoot, err = ch.Merkle().FinalizeWorkingTree(nil) + finalizedTree, newNodes, treeRootHash, err := ch.Merkle().FinalizeWorkingTree(nil) if err != nil { - return nil, nil, err + return nil, errors.Wrap(err, "failed to finalize working tree") } - workingTreeIndex, err := ch.GetWorkingTreeIndex() + if finalizedTree != nil { + err = merkle.SaveFinalizedTree(ch.stage, *finalizedTree) + if err != nil { + return nil, errors.Wrap(err, "failed to save finalized tree") + } + } + + err = merkle.SaveNodes(ch.stage, newNodes...) if err != nil { - return nil, nil, err + return nil, errors.Wrap(err, "failed to save new nodes of finalized tree") } - workingTreeLeafCount, err := ch.GetWorkingTreeLeafCount() + workingTree, err := ch.WorkingTree() if err != nil { - return nil, nil, err + return nil, errors.Wrap(err, "failed to get working tree") } - ch.Logger().Info("finalize working tree", - zap.Uint64("tree_index", workingTreeIndex), + ctx.Logger().Info("finalize working tree", + zap.Uint64("tree_index", workingTree.Index), zap.Int64("height", blockHeight), - zap.Uint64("num_leaves", workingTreeLeafCount), - zap.String("storage_root", base64.StdEncoding.EncodeToString(storageRoot)), + zap.Uint64("start_leaf_index", workingTree.StartLeafIndex), + zap.Uint64("num_leaves", workingTree.LeafCount), + zap.String("storage_root", base64.StdEncoding.EncodeToString(treeRootHash)), ) ch.finalizingBlockHeight = 0 ch.lastOutputTime = blockHeader.Time } - err = ch.Merkle().SaveWorkingTree(types.MustInt64ToUint64(blockHeight)) + workingTree, err := ch.WorkingTree() + if err != nil { + return nil, errors.Wrap(err, "failed to get working tree") + } + + err = merkle.SaveWorkingTree(ch.stage, workingTree) if err != nil { - return nil, nil, err + return nil, errors.Wrap(err, "failed to save working tree") } - return kvs, storageRoot, nil + return storageRoot, nil } func (ch *Child) handleOutput(blockTime time.Time, blockHeight int64, version uint8, blockId []byte, outputIndex uint64, storageRoot []byte) error { diff --git a/challenger/db.go b/challenger/db.go deleted file mode 100644 index aab51a8..0000000 --- a/challenger/db.go +++ /dev/null @@ -1,187 +0,0 @@ -package challenger - -import ( - "fmt" - "slices" - "time" - - challengertypes "github.com/initia-labs/opinit-bots/challenger/types" - "github.com/initia-labs/opinit-bots/node" - "github.com/initia-labs/opinit-bots/types" - "github.com/pkg/errors" -) - -func (c *Challenger) PendingChallengeToRawKVs(challenges []challengertypes.Challenge, delete bool) ([]types.RawKV, error) { - kvs := make([]types.RawKV, 0, len(challenges)) - for _, challenge := range challenges { - var value []byte - var err error - - if !delete { - value, err = challenge.Marshal() - if err != nil { - return nil, err - } - } - kvs = append(kvs, types.RawKV{ - Key: c.db.PrefixedKey(challengertypes.PrefixedPendingChallenge(challenge.Id)), - Value: value, - }) - } - return kvs, nil -} - -func (c *Challenger) deletePendingChallenge(challenge challengertypes.Challenge) types.RawKV { - return types.RawKV{ - Key: c.db.PrefixedKey(challengertypes.PrefixedPendingChallenge(challenge.Id)), - Value: nil, - } -} - -func (c *Challenger) loadPendingChallenges() (challenges []challengertypes.Challenge, err error) { - iterErr := c.db.PrefixedIterate(challengertypes.PendingChallengeKey, nil, func(_, value []byte) (stop bool, err error) { - challenge := challengertypes.Challenge{} - err = challenge.Unmarshal(value) - if err != nil { - return true, err - } - challenges = append(challenges, challenge) - return false, nil - }) - if iterErr != nil { - return nil, iterErr - } - return -} - -func (c *Challenger) saveChallenge(challenge challengertypes.Challenge) (types.RawKV, error) { - value, err := challenge.Marshal() - if err != nil { - return types.RawKV{}, err - } - return types.RawKV{ - Key: c.db.PrefixedKey(challengertypes.PrefixedChallenge(challenge.Time, challenge.Id)), - Value: value, - }, nil -} - -func (c *Challenger) loadChallenges() (challenges []challengertypes.Challenge, err error) { - iterErr := c.db.PrefixedReverseIterate(challengertypes.ChallengeKey, nil, func(_, value []byte) (stop bool, err error) { - challenge := challengertypes.Challenge{} - err = challenge.Unmarshal(value) - if err != nil { - return true, err - } - challenges = append(challenges, challenge) - if len(challenges) >= 5 { - return true, nil - } - return false, nil - }) - if iterErr != nil { - return nil, iterErr - } - slices.Reverse(challenges) - return -} - -func (c *Challenger) DeleteFutureChallenges(initialBlockTime time.Time) error { - deletingKeys := make([][]byte, 0) - iterErr := c.db.PrefixedReverseIterate(challengertypes.ChallengeKey, nil, func(key []byte, _ []byte) (stop bool, err error) { - ts, _, err := challengertypes.ParseChallenge(key) - if err != nil { - return true, err - } - if !ts.After(initialBlockTime) { - return true, nil - } - - deletingKeys = append(deletingKeys, key) - return false, nil - }) - if iterErr != nil { - return iterErr - } - - for _, key := range deletingKeys { - err := c.db.Delete(key) - if err != nil { - return err - } - } - return nil -} - -func ResetHeights(db types.DB) error { - dbNames := []string{ - types.HostName, - types.ChildName, - } - - for _, dbName := range dbNames { - if err := ResetHeight(db, dbName); err != nil { - return err - } - } - return nil -} - -func ResetHeight(db types.DB, nodeName string) error { - if nodeName != types.HostName && - nodeName != types.ChildName { - return errors.New("unknown node name") - } - nodeDB := db.WithPrefix([]byte(nodeName)) - - if err := DeletePendingEvents(nodeDB); err != nil { - return err - } - - if err := DeletePendingChallenges(nodeDB); err != nil { - return err - } - - if err := node.DeleteSyncInfo(nodeDB); err != nil { - return err - } - fmt.Printf("reset height to 0 for node %s\n", string(nodeDB.GetPrefix())) - return nil -} - -func DeletePendingEvents(db types.DB) error { - deletingKeys := make([][]byte, 0) - iterErr := db.PrefixedIterate(challengertypes.PendingEventKey, nil, func(key []byte, _ []byte) (stop bool, err error) { - deletingKeys = append(deletingKeys, key) - return false, nil - }) - if iterErr != nil { - return iterErr - } - - for _, key := range deletingKeys { - err := db.Delete(key) - if err != nil { - return err - } - } - return nil -} - -func DeletePendingChallenges(db types.DB) error { - deletingKeys := make([][]byte, 0) - iterErr := db.PrefixedIterate(challengertypes.PendingChallengeKey, nil, func(key []byte, _ []byte) (stop bool, err error) { - deletingKeys = append(deletingKeys, key) - return false, nil - }) - if iterErr != nil { - return iterErr - } - - for _, key := range deletingKeys { - err := db.Delete(key) - if err != nil { - return err - } - } - return nil -} diff --git a/challenger/db/db.go b/challenger/db/db.go new file mode 100644 index 0000000..f4607b0 --- /dev/null +++ b/challenger/db/db.go @@ -0,0 +1,181 @@ +package db + +import ( + "fmt" + "slices" + "time" + + challengertypes "github.com/initia-labs/opinit-bots/challenger/types" + "github.com/initia-labs/opinit-bots/node" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" +) + +func SavePendingChallenge(db types.BasicDB, challenge challengertypes.Challenge) error { + data, err := challenge.Marshal() + if err != nil { + return err + } + return db.Set(challengertypes.PrefixedPendingChallenge(challenge.Id), data) +} + +func SavePendingChallenges(db types.BasicDB, challenges []challengertypes.Challenge) error { + for _, challenge := range challenges { + err := SavePendingChallenge(db, challenge) + if err != nil { + return err + } + } + return nil +} + +func DeletePendingChallenge(db types.BasicDB, challenge challengertypes.Challenge) error { + return db.Delete(challengertypes.PrefixedPendingChallenge(challenge.Id)) +} + +func DeletePendingChallenges(db types.BasicDB, challenges []challengertypes.Challenge) error { + for _, challenge := range challenges { + err := DeletePendingChallenge(db, challenge) + if err != nil { + return err + } + } + return nil +} + +func LoadPendingChallenges(db types.DB) (challenges []challengertypes.Challenge, err error) { + iterErr := db.Iterate(challengertypes.PendingChallengeKey, nil, func(_, value []byte) (stop bool, err error) { + challenge := challengertypes.Challenge{} + err = challenge.Unmarshal(value) + if err != nil { + return true, err + } + challenges = append(challenges, challenge) + return false, nil + }) + if iterErr != nil { + return nil, iterErr + } + return +} + +func SaveChallenge(db types.BasicDB, challenge challengertypes.Challenge) error { + value, err := challenge.Marshal() + if err != nil { + return err + } + return db.Set(challengertypes.PrefixedChallenge(challenge.Time, challenge.Id), value) +} + +func LoadChallenges(db types.DB) (challenges []challengertypes.Challenge, err error) { + iterErr := db.ReverseIterate(challengertypes.ChallengeKey, nil, func(_, value []byte) (stop bool, err error) { + challenge := challengertypes.Challenge{} + err = challenge.Unmarshal(value) + if err != nil { + return true, err + } + challenges = append(challenges, challenge) + if len(challenges) >= 5 { + return true, nil + } + return false, nil + }) + if iterErr != nil { + return nil, iterErr + } + slices.Reverse(challenges) + return +} + +func DeleteFutureChallenges(db types.DB, initialBlockTime time.Time) error { + deletingKeys := make([][]byte, 0) + iterErr := db.Iterate(challengertypes.ChallengeKey, challengertypes.PrefixedChallengeEventTime(initialBlockTime), func(key []byte, _ []byte) (stop bool, err error) { + deletingKeys = append(deletingKeys, key) + return false, nil + }) + if iterErr != nil { + return iterErr + } + + for _, key := range deletingKeys { + err := db.Delete(key) + if err != nil { + return err + } + } + return nil +} + +func ResetHeights(db types.DB) error { + dbNames := []string{ + types.HostName, + types.ChildName, + } + + for _, dbName := range dbNames { + if err := ResetHeight(db, dbName); err != nil { + return err + } + } + return nil +} + +func ResetHeight(db types.DB, nodeName string) error { + if nodeName != types.HostName && + nodeName != types.ChildName { + return errors.New("unknown node name") + } + nodeDB := db.WithPrefix([]byte(nodeName)) + + if err := DeleteAllPendingEvents(nodeDB); err != nil { + return err + } + + if err := DeleteAllPendingChallenges(nodeDB); err != nil { + return err + } + + if err := node.DeleteSyncedHeight(nodeDB); err != nil { + return err + } + fmt.Printf("reset height to 0 for node %s\n", string(nodeDB.GetPrefix())) + return nil +} + +func DeleteAllPendingEvents(db types.DB) error { + deletingKeys := make([][]byte, 0) + iterErr := db.Iterate(challengertypes.PendingEventKey, nil, func(key []byte, _ []byte) (stop bool, err error) { + deletingKeys = append(deletingKeys, key) + return false, nil + }) + if iterErr != nil { + return iterErr + } + + for _, key := range deletingKeys { + err := db.Delete(key) + if err != nil { + return err + } + } + return nil +} + +func DeleteAllPendingChallenges(db types.DB) error { + deletingKeys := make([][]byte, 0) + iterErr := db.Iterate(challengertypes.PendingChallengeKey, nil, func(key []byte, _ []byte) (stop bool, err error) { + deletingKeys = append(deletingKeys, key) + return false, nil + }) + if iterErr != nil { + return iterErr + } + + for _, key := range deletingKeys { + err := db.Delete(key) + if err != nil { + return err + } + } + return nil +} diff --git a/challenger/eventhandler/challenge.go b/challenger/eventhandler/challenge.go index 8334e01..ae1d1ed 100644 --- a/challenger/eventhandler/challenge.go +++ b/challenger/eventhandler/challenge.go @@ -8,9 +8,11 @@ import ( challengertypes "github.com/initia-labs/opinit-bots/challenger/types" nodetypes "github.com/initia-labs/opinit-bots/node/types" "go.uber.org/zap" + + "github.com/initia-labs/opinit-bots/types" ) -func (ch *ChallengeEventHandler) CheckValue(events []challengertypes.ChallengeEvent) ([]challengertypes.Challenge, []challengertypes.ChallengeEvent, error) { +func (ch *ChallengeEventHandler) CheckValue(ctx types.Context, events []challengertypes.ChallengeEvent) ([]challengertypes.Challenge, []challengertypes.ChallengeEvent, error) { challenges := make([]challengertypes.Challenge, 0) processedEvents := make([]challengertypes.ChallengeEvent, 0) @@ -32,7 +34,7 @@ func (ch *ChallengeEventHandler) CheckValue(events []challengertypes.ChallengeEv Time: event.EventTime(), }) } else { - ch.logger.Info("pending event matched", zap.String("event", pendingEvent.String())) + ctx.Logger().Info("pending event matched", zap.String("event", pendingEvent.String())) } processedEvents = append(processedEvents, pendingEvent) diff --git a/challenger/eventhandler/db.go b/challenger/eventhandler/db.go index d8bcf72..0cc2775 100644 --- a/challenger/eventhandler/db.go +++ b/challenger/eventhandler/db.go @@ -5,22 +5,34 @@ import ( "github.com/initia-labs/opinit-bots/types" ) -func (ch *ChallengeEventHandler) PendingEventsToRawKV(events []challengertypes.ChallengeEvent, delete bool) ([]types.RawKV, error) { - kvs := make([]types.RawKV, 0, len(events)) +func SavePendingEvents(db types.BasicDB, events []challengertypes.ChallengeEvent) error { for _, event := range events { - var data []byte - var err error + err := SavePendingEvent(db, event) + if err != nil { + return err + } + } + return nil +} - if !delete { - data, err = event.Marshal() - if err != nil { - return nil, err - } +func SavePendingEvent(db types.BasicDB, event challengertypes.ChallengeEvent) error { + data, err := event.Marshal() + if err != nil { + return err + } + return db.Set(challengertypes.PrefixedPendingEvent(event.Id()), data) +} + +func DeletePendingEvents(db types.BasicDB, events []challengertypes.ChallengeEvent) error { + for _, event := range events { + err := DeletePendingEvent(db, event) + if err != nil { + return err } - kvs = append(kvs, types.RawKV{ - Key: ch.db.PrefixedKey(challengertypes.PrefixedPendingEvent(event.Id())), - Value: data, - }) } - return kvs, nil + return nil +} + +func DeletePendingEvent(db types.BasicDB, event challengertypes.ChallengeEvent) error { + return db.Delete(challengertypes.PrefixedPendingEvent(event.Id())) } diff --git a/challenger/eventhandler/event_handler.go b/challenger/eventhandler/event_handler.go index 416461b..971f917 100644 --- a/challenger/eventhandler/event_handler.go +++ b/challenger/eventhandler/event_handler.go @@ -6,21 +6,18 @@ import ( challengertypes "github.com/initia-labs/opinit-bots/challenger/types" "github.com/initia-labs/opinit-bots/types" - "go.uber.org/zap" ) type ChallengeEventHandler struct { db types.DB - logger *zap.Logger pendingEventsMu *sync.Mutex pendingEvents map[challengertypes.ChallengeId]challengertypes.ChallengeEvent timeoutDuration time.Duration } -func NewChallengeEventHandler(db types.DB, logger *zap.Logger) *ChallengeEventHandler { +func NewChallengeEventHandler(db types.DB) *ChallengeEventHandler { return &ChallengeEventHandler{ db: db, - logger: logger, pendingEventsMu: &sync.Mutex{}, pendingEvents: make(map[challengertypes.ChallengeId]challengertypes.ChallengeEvent), } diff --git a/challenger/eventhandler/pending_events.go b/challenger/eventhandler/pending_events.go index c849bf0..fd3965a 100644 --- a/challenger/eventhandler/pending_events.go +++ b/challenger/eventhandler/pending_events.go @@ -5,6 +5,7 @@ import ( "sort" challengertypes "github.com/initia-labs/opinit-bots/challenger/types" + "github.com/pkg/errors" ) func (ch *ChallengeEventHandler) GetPendingEvent(id challengertypes.ChallengeId) (challengertypes.ChallengeEvent, bool) { @@ -96,15 +97,15 @@ func (ch *ChallengeEventHandler) SetPendingEvents(events []challengertypes.Chall } func (ch *ChallengeEventHandler) loadPendingEvents() (events []challengertypes.ChallengeEvent, err error) { - iterErr := ch.db.PrefixedIterate(challengertypes.PendingEventKey, nil, func(key, value []byte) (stop bool, err error) { + iterErr := ch.db.Iterate(challengertypes.PendingEventKey, nil, func(key, value []byte) (stop bool, err error) { id, err := challengertypes.ParsePendingEvent(key) if err != nil { - return true, err + return true, errors.Wrap(err, "failed to parse pending event key") } event, err := challengertypes.UnmarshalChallengeEvent(id.Type, value) if err != nil { - return true, err + return true, errors.Wrap(err, "failed to unmarshal challenge event") } events = append(events, event) return false, nil diff --git a/challenger/handler.go b/challenger/handler.go index 4963f30..fab3204 100644 --- a/challenger/handler.go +++ b/challenger/handler.go @@ -1,40 +1,41 @@ package challenger import ( - "context" "sort" + challengerdb "github.com/initia-labs/opinit-bots/challenger/db" challengertypes "github.com/initia-labs/opinit-bots/challenger/types" "github.com/initia-labs/opinit-bots/types" "go.uber.org/zap" ) -func (c *Challenger) challengeHandler(ctx context.Context) error { +func (c *Challenger) challengeHandler(ctx types.Context) error { defer close(c.challengeChStopped) for { select { case <-ctx.Done(): return nil case challenge := <-c.challengeCh: - kvs := make([]types.RawKV, 0) - kv := c.deletePendingChallenge(challenge) - kvs = append(kvs, kv) - kv, err := c.saveChallenge(challenge) + c.stage.Reset() + // Remove the pending challenge that was stored by the client or host + err := challengerdb.DeletePendingChallenge(c.stage, challenge) if err != nil { return err } - kvs = append(kvs, kv) - - err = c.handleChallenge(challenge) + err = challengerdb.SaveChallenge(c.stage, challenge) if err != nil { return err } - err = c.db.RawBatchSet(kvs...) + err = c.handleChallenge(ctx, challenge) if err != nil { return err } + err = c.stage.Commit() + if err != nil { + return err + } c.insertLatestChallenges(challenge) } } @@ -68,9 +69,9 @@ func (c *Challenger) getLatestChallenges() []challengertypes.Challenge { return res } -func (c *Challenger) handleChallenge(challenge challengertypes.Challenge) error { +func (c *Challenger) handleChallenge(ctx types.Context, challenge challengertypes.Challenge) error { // TODO: warning log or send to alerting system - c.logger.Error("challenge", zap.Any("challenge", challenge)) + ctx.Logger().Error("challenge", zap.Any("challenge", challenge)) return nil } diff --git a/challenger/host/deposit.go b/challenger/host/deposit.go index 2fadf2a..90ad30b 100644 --- a/challenger/host/deposit.go +++ b/challenger/host/deposit.go @@ -1,23 +1,23 @@ package host import ( - "context" - "errors" "time" "cosmossdk.io/math" nodetypes "github.com/initia-labs/opinit-bots/node/types" hostprovider "github.com/initia-labs/opinit-bots/provider/host" + "github.com/initia-labs/opinit-bots/types" sdk "github.com/cosmos/cosmos-sdk/types" challengertypes "github.com/initia-labs/opinit-bots/challenger/types" + "github.com/pkg/errors" ) -func (h *Host) initiateDepositHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (h *Host) initiateDepositHandler(_ types.Context, args nodetypes.EventHandlerArgs) error { bridgeId, l1Sequence, from, to, l1Denom, l2Denom, amount, _, err := hostprovider.ParseMsgInitiateDeposit(args.EventAttributes) if err != nil { - return err + return errors.Wrap(err, "failed to parse initiate deposit event") } if bridgeId != h.BridgeId() { // pass other bridge deposit event diff --git a/challenger/host/handler.go b/challenger/host/handler.go index 8cb0e7f..bcbb9ec 100644 --- a/challenger/host/handler.go +++ b/challenger/host/handler.go @@ -1,31 +1,33 @@ package host import ( - "context" - + challengerdb "github.com/initia-labs/opinit-bots/challenger/db" + "github.com/initia-labs/opinit-bots/challenger/eventhandler" challengertypes "github.com/initia-labs/opinit-bots/challenger/types" + "github.com/initia-labs/opinit-bots/node" nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" ) -func (h *Host) beginBlockHandler(_ context.Context, args nodetypes.BeginBlockArgs) error { +func (h *Host) beginBlockHandler(_ types.Context, args nodetypes.BeginBlockArgs) error { h.eventQueue = h.eventQueue[:0] h.outputPendingEventQueue = h.outputPendingEventQueue[:0] + h.stage.Reset() return nil } -func (h *Host) endBlockHandler(_ context.Context, args nodetypes.EndBlockArgs) error { - blockHeight := args.Block.Header.Height - batchKVs := []types.RawKV{ - h.Node().SyncInfoToRawKV(blockHeight), +func (h *Host) endBlockHandler(_ types.Context, args nodetypes.EndBlockArgs) error { + err := node.SetSyncedHeight(h.stage, args.Block.Header.Height) + if err != nil { + return errors.Wrap(err, "failed to set synced height") } // save all pending events to child db - eventKVs, err := h.child.PendingEventsToRawKV(h.eventQueue, false) + err = eventhandler.SavePendingEvents(h.stage.WithPrefixedKey(h.child.DB().PrefixedKey), h.eventQueue) if err != nil { - return err + return errors.Wrap(err, "failed to save pending events on child db") } - batchKVs = append(batchKVs, eventKVs...) // save all pending events to host db // currently, only output event is considered as pending event @@ -33,11 +35,10 @@ func (h *Host) endBlockHandler(_ context.Context, args nodetypes.EndBlockArgs) e panic("must not happen, outputPendingEventQueue should have only one output event") } - eventKVs, err = h.eventHandler.PendingEventsToRawKV(h.outputPendingEventQueue, false) + err = eventhandler.SavePendingEvents(h.stage, h.outputPendingEventQueue) if err != nil { return err } - batchKVs = append(batchKVs, eventKVs...) prevEvents := make([]challengertypes.ChallengeEvent, 0) for _, pendingEvent := range h.outputPendingEventQueue { @@ -47,35 +48,33 @@ func (h *Host) endBlockHandler(_ context.Context, args nodetypes.EndBlockArgs) e } } unprocessedEvents := h.eventHandler.GetUnprocessedPendingEvents(prevEvents) - pendingChallenges, precessedEvents := h.eventHandler.CheckTimeout(args.Block.Header.Time, unprocessedEvents) - precessedEvents = append(precessedEvents, prevEvents...) + pendingChallenges, processedEvents := h.eventHandler.CheckTimeout(args.Block.Header.Time, unprocessedEvents) + processedEvents = append(processedEvents, prevEvents...) // delete processed events - eventKVs, err = h.eventHandler.PendingEventsToRawKV(precessedEvents, true) + err = eventhandler.DeletePendingEvents(h.stage, processedEvents) if err != nil { return err } - batchKVs = append(batchKVs, eventKVs...) - challengesKVs, err := h.challenger.PendingChallengeToRawKVs(pendingChallenges, false) + err = challengerdb.SavePendingChallenges(h.stage.WithPrefixedKey(h.challenger.DB().PrefixedKey), pendingChallenges) if err != nil { - return err + return errors.Wrap(err, "failed to save pending events on child db") } - batchKVs = append(batchKVs, challengesKVs...) - err = h.DB().RawBatchSet(batchKVs...) + err = h.stage.Commit() if err != nil { - return err + return errors.Wrap(err, "failed to commit stage") } h.child.SetPendingEvents(h.eventQueue) - h.eventHandler.DeletePendingEvents(precessedEvents) + h.eventHandler.DeletePendingEvents(processedEvents) h.eventHandler.SetPendingEvents(h.outputPendingEventQueue) h.challenger.SendPendingChallenges(pendingChallenges) return nil } -func (h *Host) txHandler(_ context.Context, args nodetypes.TxHandlerArgs) error { +func (h *Host) txHandler(_ types.Context, args nodetypes.TxHandlerArgs) error { if args.TxIndex == 0 { h.oracleTxHandler(args.BlockHeight, args.BlockTime, args.Tx) } diff --git a/challenger/host/host.go b/challenger/host/host.go index f948631..a0b40bd 100644 --- a/challenger/host/host.go +++ b/challenger/host/host.go @@ -4,9 +4,8 @@ import ( "context" "time" - "go.uber.org/zap" - ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/pkg/errors" nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" @@ -18,12 +17,12 @@ import ( ) type challenger interface { - PendingChallengeToRawKVs([]challengertypes.Challenge, bool) ([]types.RawKV, error) + DB() types.DB SendPendingChallenges([]challengertypes.Challenge) } type childNode interface { - PendingEventsToRawKV([]challengertypes.ChallengeEvent, bool) ([]types.RawKV, error) + DB() types.DB SetPendingEvents([]challengertypes.ChallengeEvent) } @@ -40,24 +39,27 @@ type Host struct { // status info lastOutputIndex uint64 lastOutputTime time.Time + + stage types.CommitDB } func NewHostV1( cfg nodetypes.NodeConfig, - db types.DB, logger *zap.Logger, + db types.DB, ) *Host { return &Host{ - BaseHost: hostprovider.NewBaseHostV1(cfg, db, logger), - eventHandler: eventhandler.NewChallengeEventHandler(db, logger), + BaseHost: hostprovider.NewBaseHostV1(cfg, db), + eventHandler: eventhandler.NewChallengeEventHandler(db), eventQueue: make([]challengertypes.ChallengeEvent, 0), outputPendingEventQueue: make([]challengertypes.ChallengeEvent, 0), + stage: db.NewStage(), } } -func (h *Host) Initialize(ctx context.Context, processedHeight int64, child childNode, bridgeInfo ophosttypes.QueryBridgeResponse, challenger challenger) (time.Time, error) { +func (h *Host) Initialize(ctx types.Context, processedHeight int64, child childNode, bridgeInfo ophosttypes.QueryBridgeResponse, challenger challenger) (time.Time, error) { err := h.BaseHost.Initialize(ctx, processedHeight, bridgeInfo, nil) if err != nil { - return time.Time{}, err + return time.Time{}, errors.Wrap(err, "failed to initialize base host") } h.child = child h.challenger = challenger @@ -66,7 +68,7 @@ func (h *Host) Initialize(ctx context.Context, processedHeight int64, child chil err = h.eventHandler.Initialize(bridgeInfo.BridgeConfig.SubmissionInterval) if err != nil { - return time.Time{}, err + return time.Time{}, errors.Wrap(err, "failed to initialize event handler") } var blockTime time.Time @@ -75,7 +77,7 @@ func (h *Host) Initialize(ctx context.Context, processedHeight int64, child chil if h.Node().HeightInitialized() { blockTime, err = h.Node().QueryBlockTime(ctx, h.Node().GetHeight()) if err != nil { - return time.Time{}, err + return time.Time{}, errors.Wrap(err, "failed to query block time") } } return blockTime, nil diff --git a/challenger/host/output.go b/challenger/host/output.go index 8636df7..76a0e66 100644 --- a/challenger/host/output.go +++ b/challenger/host/output.go @@ -1,30 +1,31 @@ package host import ( - "context" "encoding/base64" "time" nodetypes "github.com/initia-labs/opinit-bots/node/types" hostprovider "github.com/initia-labs/opinit-bots/provider/host" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" "go.uber.org/zap" challengertypes "github.com/initia-labs/opinit-bots/challenger/types" ) -func (h *Host) proposeOutputHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (h *Host) proposeOutputHandler(ctx types.Context, args nodetypes.EventHandlerArgs) error { bridgeId, l2BlockNumber, outputIndex, proposer, outputRoot, err := hostprovider.ParseMsgProposeOutput(args.EventAttributes) if err != nil { - return err + return errors.Wrap(err, "failed to parse propose output event") } if bridgeId != h.BridgeId() { // pass other bridge output proposal event return nil } - return h.handleProposeOutput(bridgeId, proposer, outputIndex, l2BlockNumber, outputRoot, args.BlockTime) + return h.handleProposeOutput(ctx, bridgeId, proposer, outputIndex, l2BlockNumber, outputRoot, args.BlockTime) } -func (h *Host) handleProposeOutput(bridgeId uint64, proposer string, outputIndex uint64, l2BlockNumber int64, outputRoot []byte, blockTime time.Time) error { +func (h *Host) handleProposeOutput(ctx types.Context, bridgeId uint64, proposer string, outputIndex uint64, l2BlockNumber int64, outputRoot []byte, blockTime time.Time) error { output := challengertypes.NewOutput(l2BlockNumber, outputIndex, outputRoot[:], blockTime) h.lastOutputIndex = outputIndex @@ -32,7 +33,7 @@ func (h *Host) handleProposeOutput(bridgeId uint64, proposer string, outputIndex h.eventQueue = append(h.eventQueue, output) h.outputPendingEventQueue = append(h.outputPendingEventQueue, output) - h.Logger().Info("propose output", + ctx.Logger().Info("propose output", zap.Uint64("bridge_id", bridgeId), zap.String("proposer", proposer), zap.Uint64("output_index", outputIndex), diff --git a/challenger/host/status.go b/challenger/host/status.go index 2010ab1..eb82331 100644 --- a/challenger/host/status.go +++ b/challenger/host/status.go @@ -1,11 +1,11 @@ package host import ( - "errors" "time" challengertypes "github.com/initia-labs/opinit-bots/challenger/types" nodetypes "github.com/initia-labs/opinit-bots/node/types" + "github.com/pkg/errors" ) type Status struct { @@ -18,7 +18,7 @@ type Status struct { func (h Host) GetStatus() (Status, error) { nodeStatus, err := h.GetNodeStatus() if err != nil { - return Status{}, err + return Status{}, errors.Wrap(err, "failed to get node status") } if h.eventHandler == nil { return Status{}, errors.New("event handler is not initialized") diff --git a/challenger/query.go b/challenger/query.go index 4c42c39..49605cb 100644 --- a/challenger/query.go +++ b/challenger/query.go @@ -1,27 +1,56 @@ package challenger -import challengertypes "github.com/initia-labs/opinit-bots/challenger/types" +import ( + "encoding/base64" -func (c *Challenger) QueryChallenges(page uint64) (challenges []challengertypes.Challenge, err error) { - i := uint64(0) - iterErr := c.db.PrefixedIterate(challengertypes.ChallengeKey, nil, func(_, value []byte) (stop bool, err error) { - i++ - if i >= (page+1)*100 { + dbtypes "github.com/initia-labs/opinit-bots/db/types" + + challengertypes "github.com/initia-labs/opinit-bots/challenger/types" +) + +func (c *Challenger) QueryChallenges(from string, limit uint64, descOrder bool) (res challengertypes.QueryChallengesResponse, err error) { + challenges := []challengertypes.Challenge{} + next := "" + + count := uint64(0) + fetchFn := func(key, value []byte) (bool, error) { + if count >= limit { + next = base64.StdEncoding.EncodeToString(key) return true, nil } - if i < page*100 { - return false, nil - } challenge := challengertypes.Challenge{} err = challenge.Unmarshal(value) if err != nil { return true, err } + count++ challenges = append(challenges, challenge) return false, nil - }) - if iterErr != nil { - return nil, iterErr } - return + + var startKey []byte + if from != "" { + startKey, err = base64.StdEncoding.DecodeString(from) + if err != nil { + return challengertypes.QueryChallengesResponse{}, err + } + } + + if descOrder { + err = c.db.ReverseIterate(dbtypes.AppendSplitter(challengertypes.ChallengeKey), startKey, fetchFn) + if err != nil { + return challengertypes.QueryChallengesResponse{}, err + } + } else { + err = c.db.Iterate(dbtypes.AppendSplitter(challengertypes.ChallengeKey), startKey, fetchFn) + if err != nil { + return challengertypes.QueryChallengesResponse{}, err + } + } + + if next != "" { + res.Next = &next + } + res.Challenges = challenges + return res, nil } diff --git a/challenger/types/config.go b/challenger/types/config.go index 8cea786..97e9442 100644 --- a/challenger/types/config.go +++ b/challenger/types/config.go @@ -111,7 +111,7 @@ func (cfg Config) Validate() error { return nil } -func (cfg Config) L1NodeConfig(homePath string) nodetypes.NodeConfig { +func (cfg Config) L1NodeConfig() nodetypes.NodeConfig { nc := nodetypes.NodeConfig{ RPC: cfg.L1Node.RPCAddress, ProcessType: nodetypes.PROCESS_TYPE_DEFAULT, @@ -120,7 +120,7 @@ func (cfg Config) L1NodeConfig(homePath string) nodetypes.NodeConfig { return nc } -func (cfg Config) L2NodeConfig(homePath string) nodetypes.NodeConfig { +func (cfg Config) L2NodeConfig() nodetypes.NodeConfig { nc := nodetypes.NodeConfig{ RPC: cfg.L2Node.RPCAddress, ProcessType: nodetypes.PROCESS_TYPE_DEFAULT, diff --git a/challenger/types/keys.go b/challenger/types/keys.go index 6a1f2f2..596b663 100644 --- a/challenger/types/keys.go +++ b/challenger/types/keys.go @@ -38,13 +38,13 @@ func prefixedTimeEvent(eventTime time.Time) []byte { return append(dbtypes.FromUint64Key(types.MustInt64ToUint64(eventTime.UnixNano())), dbtypes.Splitter) } -func prefixedChallengeEventTime(eventTime time.Time) []byte { +func PrefixedChallengeEventTime(eventTime time.Time) []byte { return append(append(ChallengeKey, dbtypes.Splitter), prefixedTimeEvent(eventTime)...) } func PrefixedChallenge(eventTime time.Time, id ChallengeId) []byte { - return append(prefixedChallengeEventTime(eventTime), + return append(PrefixedChallengeEventTime(eventTime), prefixedEventTypeId(id.Type, id.Id)...) } @@ -71,5 +71,5 @@ func ParseChallenge(key []byte) (time.Time, ChallengeId, error) { typeBz := key[cursor : cursor+1] cursor += 1 + 1 // u8 + splitter idBz := key[cursor:] - return time.Unix(0, types.MustUint64ToInt64(dbtypes.ToUint64Key(timeBz))), ChallengeId{Type: EventType(typeBz[0]), Id: dbtypes.ToUint64Key(idBz)}, nil + return time.Unix(0, types.MustUint64ToInt64(dbtypes.ToUint64Key(timeBz))).UTC(), ChallengeId{Type: EventType(typeBz[0]), Id: dbtypes.ToUint64Key(idBz)}, nil } diff --git a/challenger/types/query.go b/challenger/types/query.go new file mode 100644 index 0000000..35396c1 --- /dev/null +++ b/challenger/types/query.go @@ -0,0 +1,6 @@ +package types + +type QueryChallengesResponse struct { + Challenges []Challenge `json:"challenges"` + Next *string `json:"next,omitempty"` +} diff --git a/client/client.go b/client/client.go index 8c408f1..f7a5434 100644 --- a/client/client.go +++ b/client/client.go @@ -19,6 +19,7 @@ import ( ctypes "github.com/cometbft/cometbft/rpc/core/types" jsonrpcclient "github.com/cometbft/cometbft/rpc/jsonrpc/client" "github.com/cometbft/cometbft/types" + clienttypes "github.com/initia-labs/opinit-bots/client/types" ) /* @@ -147,6 +148,13 @@ func NewWithClient(remote, wsEndpoint string, client *http.Client) (*HTTP, error return httpClient, nil } +// NewWithCaller allows for setting a custom caller for testing purposes. +func NewWithCaller(caller jsonrpcclient.Caller) *HTTP { + return &HTTP{ + baseRPCClient: &baseRPCClient{caller: caller}, + } +} + var _ rpcclient.Client = (*HTTP)(nil) // SetLogger sets a logger. @@ -412,7 +420,7 @@ func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.Resul } func (c *baseRPCClient) BlockBulk(ctx context.Context, start *int64, end *int64) ([][]byte, error) { - result := new(ResultBlockBulk) + result := new(clienttypes.ResultBlockBulk) params := make(map[string]interface{}) if start != nil { params["start"] = start @@ -494,7 +502,7 @@ func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.Resu } func (c *baseRPCClient) RawCommit(ctx context.Context, height *int64) ([]byte, error) { - result := new(ResultRawCommit) + result := new(clienttypes.ResultRawCommit) params := make(map[string]interface{}) if height != nil { params["height"] = height diff --git a/client/mock/client.go b/client/mock/client.go new file mode 100644 index 0000000..1ecef01 --- /dev/null +++ b/client/mock/client.go @@ -0,0 +1,68 @@ +package mockclient + +import ( + "context" + "errors" + + ctypes "github.com/cometbft/cometbft/rpc/core/types" + jsonrpcclient "github.com/cometbft/cometbft/rpc/jsonrpc/client" + clienttypes "github.com/initia-labs/opinit-bots/client/types" +) + +type MockCaller struct { + latestHeight int64 + resultStatus ctypes.ResultStatus + rawCommits map[int64][]byte +} + +func NewMockCaller() *MockCaller { + return &MockCaller{ + rawCommits: make(map[int64][]byte), + } +} + +var _ jsonrpcclient.Caller = (*MockCaller)(nil) + +func (m *MockCaller) Call(ctx context.Context, method string, params map[string]interface{}, result interface{}) (interface{}, error) { + switch method { + case "status": + return m.status(params, result.(*ctypes.ResultStatus)) + case "raw_commit": + return m.rawCommit(params, result.(*clienttypes.ResultRawCommit)) + } + return nil, errors.New("not supported method") +} + +func (m *MockCaller) SetLatestHeight(height int64) { + m.latestHeight = height +} + +func (m *MockCaller) SetResultStatus(result ctypes.ResultStatus) { + m.resultStatus = result +} + +func (m *MockCaller) status(_ map[string]interface{}, result *ctypes.ResultStatus) (interface{}, error) { + *result = m.resultStatus + return nil, nil +} + +func (m *MockCaller) SetRawCommit(height int64, commitBytes []byte) { + m.rawCommits[height] = commitBytes +} + +func (m *MockCaller) rawCommit(params map[string]interface{}, result *clienttypes.ResultRawCommit) (interface{}, error) { + h := params["height"].(*int64) + height := m.latestHeight + if h != nil { + height = *h + } + + commitBytes, ok := m.rawCommits[height] + if !ok { + return nil, errors.New("commit not found") + } + *result = clienttypes.ResultRawCommit{ + Commit: commitBytes, + } + return nil, nil +} diff --git a/client/types.go b/client/types/types.go similarity index 92% rename from client/types.go rename to client/types/types.go index 22ef120..633905b 100644 --- a/client/types.go +++ b/client/types/types.go @@ -1,4 +1,4 @@ -package http +package types // Result of block bulk type ResultBlockBulk struct { diff --git a/cmd/opinitd/db.go b/cmd/opinitd/db.go index a3c8d50..56eb322 100644 --- a/cmd/opinitd/db.go +++ b/cmd/opinitd/db.go @@ -33,21 +33,21 @@ v0.1.9-2: Fill block hash of finalized tree switch version { case "v0.1.5": // Run migration for v0.1.5 - db, err := db.NewDB(bot.GetDBPath(ctx.homePath, bottypes.BotTypeExecutor)) + db, err := db.NewDB(GetDBPath(ctx.homePath, bottypes.BotTypeExecutor)) if err != nil { return err } return executor.Migration015(db) case "v0.1.9-1": // Run migration for v0.1.9-1 - db, err := db.NewDB(bot.GetDBPath(ctx.homePath, bottypes.BotTypeExecutor)) + db, err := db.NewDB(GetDBPath(ctx.homePath, bottypes.BotTypeExecutor)) if err != nil { return err } - return executor.Migration0191(db) + return executor.Migration019_1(db) case "v0.1.9-2": // Run migration for v0.1.9-2 - db, err := db.NewDB(bot.GetDBPath(ctx.homePath, bottypes.BotTypeExecutor)) + db, err := db.NewDB(GetDBPath(ctx.homePath, bottypes.BotTypeExecutor)) if err != nil { return err } @@ -57,7 +57,9 @@ v0.1.9-2: Fill block hash of finalized tree if err != nil { return err } - cmdCtx = types.WithPollingInterval(cmdCtx, interval) + + baseCtx := types.NewContext(cmdCtx, ctx.logger.Named(string(bottypes.BotTypeExecutor)), ctx.homePath). + WithPollingInterval(interval) configPath, err := getConfigPath(cmd, ctx.homePath, string(bottypes.BotTypeExecutor)) if err != nil { @@ -70,7 +72,7 @@ v0.1.9-2: Fill block hash of finalized tree return err } - l2Config := cfg.L2NodeConfig(ctx.homePath) + l2Config := cfg.L2NodeConfig() broadcasterConfig := l2Config.BroadcasterConfig cdc, _, err := child.GetCodec(broadcasterConfig.Bech32Prefix) if err != nil { @@ -82,14 +84,21 @@ v0.1.9-2: Fill block hash of finalized tree return err } - return executor.Migration0192(cmdCtx, db, rpcClient) + return executor.Migration019_2(baseCtx, db, rpcClient) case "v0.1.10": // Run migration for v0.1.10 - db, err := db.NewDB(bot.GetDBPath(ctx.homePath, bottypes.BotTypeExecutor)) + db, err := db.NewDB(GetDBPath(ctx.homePath, bottypes.BotTypeExecutor)) if err != nil { return err } return executor.Migration0110(db) + case "v0.1.11": + // Run migration for v0.1.11 + db, err := db.NewDB(GetDBPath(ctx.homePath, bottypes.BotTypeExecutor)) + if err != nil { + return err + } + return executor.Migration0111(db) default: return fmt.Errorf("unknown migration version: %s", version) } diff --git a/cmd/opinitd/key.go b/cmd/opinitd/key.go index 5632455..7991fc8 100644 --- a/cmd/opinitd/key.go +++ b/cmd/opinitd/key.go @@ -151,7 +151,7 @@ $ keys add l2 key2 --output json`), outputFormat, _ := cmd.Flags().GetString(flagOutput) var output string switch outputFormat { - case "json": + case "json": //nolint jsonOutput := make(keyJsonOutput) jsonOutput[account.Name] = keyJsonOutputElem{ Address: addrString, diff --git a/cmd/opinitd/reset.go b/cmd/opinitd/reset.go index 82d84b2..9a044be 100644 --- a/cmd/opinitd/reset.go +++ b/cmd/opinitd/reset.go @@ -7,9 +7,8 @@ import ( "github.com/spf13/cobra" - "github.com/initia-labs/opinit-bots/bot" bottypes "github.com/initia-labs/opinit-bots/bot/types" - "github.com/initia-labs/opinit-bots/challenger" + challengerdb "github.com/initia-labs/opinit-bots/challenger/db" "github.com/initia-labs/opinit-bots/db" "github.com/initia-labs/opinit-bots/executor" ) @@ -58,7 +57,7 @@ func resetHeightsCmd(ctx *cmdContext) *cobra.Command { return err } - db, err := db.NewDB(bot.GetDBPath(ctx.homePath, botType)) + db, err := db.NewDB(GetDBPath(ctx.homePath, botType)) if err != nil { return err } @@ -67,7 +66,7 @@ func resetHeightsCmd(ctx *cmdContext) *cobra.Command { case bottypes.BotTypeExecutor: return executor.ResetHeights(db) case bottypes.BotTypeChallenger: - return challenger.ResetHeights(db) + return challengerdb.ResetHeights(db) } return errors.New("unknown bot type") }, @@ -96,16 +95,17 @@ Challenger node types: return err } - db, err := db.NewDB(bot.GetDBPath(ctx.homePath, botType)) + db, err := db.NewDB(GetDBPath(ctx.homePath, botType)) if err != nil { return err } + defer db.Close() switch botType { case bottypes.BotTypeExecutor: return executor.ResetHeight(db, args[1]) case bottypes.BotTypeChallenger: - return challenger.ResetHeight(db, args[1]) + return challengerdb.ResetHeight(db, args[1]) } return errors.New("unknown bot type") }, diff --git a/cmd/opinitd/root.go b/cmd/opinitd/root.go index b2919ce..ce8ef7a 100644 --- a/cmd/opinitd/root.go +++ b/cmd/opinitd/root.go @@ -20,7 +20,7 @@ func NewRootCmd() *cobra.Command { } rootCmd.PersistentPreRunE = func(cmd *cobra.Command, _ []string) (err error) { - ctx.logger, err = getLogger(ctx.v.GetString("log-level")) + ctx.logger, err = getLogger(ctx.v.GetString("log-level"), ctx.v.GetString("log-format")) if err != nil { return err } @@ -41,6 +41,11 @@ func NewRootCmd() *cobra.Command { panic(err) } + rootCmd.PersistentFlags().String("log-format", "plain", "log format (plain or json)") + if err := ctx.v.BindPFlag("log-format", rootCmd.PersistentFlags().Lookup("log-format")); err != nil { + panic(err) + } + rootCmd.AddCommand( initCmd(ctx), startCmd(ctx), @@ -55,7 +60,7 @@ func NewRootCmd() *cobra.Command { return rootCmd } -func getLogger(logLevel string) (*zap.Logger, error) { +func getLogger(logLevel string, logFormat string) (*zap.Logger, error) { level := zap.InfoLevel switch logLevel { case "debug": @@ -70,10 +75,15 @@ func getLogger(logLevel string) (*zap.Logger, error) { level = zap.FatalLevel } - config := zap.NewDevelopmentConfig() + encoding := "console" + if logFormat == "json" { + encoding = "json" + } + + config := zap.NewProductionConfig() config.Level = zap.NewAtomicLevelAt(level) config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder - + config.Encoding = encoding return config.Build() } diff --git a/cmd/opinitd/start.go b/cmd/opinitd/start.go index 34d33af..f685077 100644 --- a/cmd/opinitd/start.go +++ b/cmd/opinitd/start.go @@ -13,6 +13,7 @@ import ( "github.com/initia-labs/opinit-bots/bot" bottypes "github.com/initia-labs/opinit-bots/bot/types" + "github.com/initia-labs/opinit-bots/db" "github.com/initia-labs/opinit-bots/types" ) @@ -20,7 +21,7 @@ const ( flagPollingInterval = "polling-interval" ) -func startCmd(ctx *cmdContext) *cobra.Command { +func startCmd(cmdCtx *cmdContext) *cobra.Command { cmd := &cobra.Command{ Use: "start [bot-name]", Args: cobra.ExactArgs(1), @@ -36,35 +37,43 @@ Currently supported bots: return err } - configPath, err := getConfigPath(cmd, ctx.homePath, args[0]) + configPath, err := getConfigPath(cmd, cmdCtx.homePath, args[0]) if err != nil { return err } - bot, err := bot.NewBot(botType, ctx.logger, ctx.homePath, configPath) + db, err := db.NewDB(GetDBPath(cmdCtx.homePath, botType)) if err != nil { return err } + defer db.Close() - cmdCtx, botDone := context.WithCancel(cmd.Context()) + bot, err := bot.NewBot(botType, db, configPath) + if err != nil { + return err + } + + ctx, botDone := context.WithCancel(cmd.Context()) gracefulShutdown(botDone) - errGrp, ctx := errgroup.WithContext(cmdCtx) - ctx = types.WithErrGrp(ctx, errGrp) + errGrp, ctx := errgroup.WithContext(ctx) interval, err := cmd.Flags().GetDuration(flagPollingInterval) if err != nil { return err } - ctx = types.WithPollingInterval(ctx, interval) - err = bot.Initialize(ctx) + + baseCtx := types.NewContext(ctx, cmdCtx.logger.Named(string(botType)), cmdCtx.homePath). + WithErrGrp(errGrp). + WithPollingInterval(interval) + err = bot.Initialize(baseCtx) if err != nil { return err } - return bot.Start(ctx) + return bot.Start(baseCtx) }, } - cmd = configFlag(ctx.v, cmd) + cmd = configFlag(cmdCtx.v, cmd) cmd.Flags().Duration(flagPollingInterval, 100*time.Millisecond, "Polling interval in milliseconds") return cmd } @@ -78,3 +87,7 @@ func gracefulShutdown(done context.CancelFunc) { done() }() } + +func GetDBPath(homePath string, botName bottypes.BotType) string { + return fmt.Sprintf(homePath+"/%s.db", botName) +} diff --git a/cmd/opinitd/tx.go b/cmd/opinitd/tx.go index ebca4cc..bdfeefa 100644 --- a/cmd/opinitd/tx.go +++ b/cmd/opinitd/tx.go @@ -50,13 +50,16 @@ func txGrantOracleCmd(baseCtx *cmdContext) *cobra.Command { gracefulShutdown(botDone) errGrp, ctx := errgroup.WithContext(cmdCtx) - ctx = types.WithErrGrp(ctx, errGrp) + + baseCtx := types.NewContext(ctx, baseCtx.logger.Named(string(bottypes.BotTypeExecutor)), baseCtx.homePath). + WithErrGrp(errGrp) account, err := l2BroadcasterAccount(baseCtx, cmd) if err != nil { return err } - err = account.Load(ctx) + + err = account.Load(baseCtx) if err != nil { return err } @@ -81,12 +84,12 @@ func txGrantOracleCmd(baseCtx *cmdContext) *cobra.Command { return err } - txBytes, _, err := account.BuildTxWithMessages(ctx, []sdk.Msg{grantMsg, feegrantMsg}) + txBytes, _, err := account.BuildTxWithMsgs(ctx, []sdk.Msg{grantMsg, feegrantMsg}) if err != nil { return errors.Wrapf(err, "simulation failed") } - res, err := account.BroadcastTxSync(ctx, txBytes) + res, err := account.BroadcastTxSync(baseCtx, txBytes) if err != nil { // TODO: handle error, may repeat sending tx return fmt.Errorf("broadcast txs: %w", err) @@ -104,8 +107,8 @@ func txGrantOracleCmd(baseCtx *cmdContext) *cobra.Command { return cmd } -func l2BroadcasterAccount(ctx *cmdContext, cmd *cobra.Command) (*broadcaster.BroadcasterAccount, error) { - configPath, err := getConfigPath(cmd, ctx.homePath, string(bottypes.BotTypeExecutor)) +func l2BroadcasterAccount(ctx types.Context, cmd *cobra.Command) (*broadcaster.BroadcasterAccount, error) { + configPath, err := getConfigPath(cmd, ctx.HomePath(), string(bottypes.BotTypeExecutor)) if err != nil { return nil, err } @@ -116,7 +119,7 @@ func l2BroadcasterAccount(ctx *cmdContext, cmd *cobra.Command) (*broadcaster.Bro return nil, err } - l2Config := cfg.L2NodeConfig(ctx.homePath) + l2Config := cfg.L2NodeConfig() broadcasterConfig := l2Config.BroadcasterConfig cdc, txConfig, err := child.GetCodec(broadcasterConfig.Bech32Prefix) if err != nil { @@ -132,5 +135,5 @@ func l2BroadcasterAccount(ctx *cmdContext, cmd *cobra.Command) (*broadcaster.Bro Name: cfg.BridgeExecutor, } - return broadcaster.NewBroadcasterAccount(*broadcasterConfig, cdc, txConfig, rpcClient, keyringConfig) + return broadcaster.NewBroadcasterAccount(ctx, *broadcasterConfig, cdc, txConfig, rpcClient, keyringConfig) } diff --git a/db/db.go b/db/db.go index 1590c1c..939d8c4 100644 --- a/db/db.go +++ b/db/db.go @@ -3,6 +3,7 @@ package db import ( "bytes" + "github.com/pkg/errors" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/util" @@ -14,40 +15,20 @@ var _ types.DB = (*LevelDB)(nil) type LevelDB struct { db *leveldb.DB - path string prefix []byte } -func NewDB(path string) (types.DB, error) { +func NewDB(path string) (*LevelDB, error) { db, err := leveldb.OpenFile(path, nil) if err != nil { return nil, err } return &LevelDB{ - db: db, - path: path, + db: db, }, nil } -// RawBatchSet sets the key-value pairs in the database without prefixing the keys. -// -// @dev: `LevelDB.prefix“ is not used as the prefix for the keys. -func (db *LevelDB) RawBatchSet(kvs ...types.RawKV) error { - if len(kvs) == 0 { - return nil - } - batch := new(leveldb.Batch) - for _, kv := range kvs { - if kv.Value == nil { - batch.Delete(kv.Key) - } else { - batch.Put(kv.Key, kv.Value) - } - } - return db.db.Write(batch, nil) -} - // BatchSet sets the key-value pairs in the database with prefixing the keys. func (db *LevelDB) BatchSet(kvs ...types.KV) error { if len(kvs) == 0 { @@ -71,7 +52,11 @@ func (db *LevelDB) Set(key []byte, value []byte) error { // Get gets the value of the key in the database with prefixing the key. func (db *LevelDB) Get(key []byte) ([]byte, error) { - return db.db.Get(db.PrefixedKey(key), nil) + v, err := db.db.Get(db.PrefixedKey(key), nil) + if errors.Is(err, leveldb.ErrNotFound) { + return nil, errors.Wrapf(err, "key: %v", key) + } + return v, err } // Delete deletes the key in the database with prefixing the key. @@ -84,10 +69,10 @@ func (db *LevelDB) Close() error { return db.db.Close() } -// PrefixedIterate iterates over the key-value pairs in the database with prefixing the keys. +// Iterate iterates over the key-value pairs in the database with prefixing the keys. // // @dev: `LevelDB.prefix + prefix` is used as the prefix for the iteration. -func (db *LevelDB) PrefixedIterate(prefix []byte, start []byte, cb func(key, value []byte) (stop bool, err error)) (iterErr error) { +func (db *LevelDB) Iterate(prefix []byte, start []byte, cb func(key, value []byte) (stop bool, err error)) (iterErr error) { iter := db.db.NewIterator(util.BytesPrefix(db.PrefixedKey(prefix)), nil) defer func() { iter.Release() @@ -114,7 +99,8 @@ func (db *LevelDB) PrefixedIterate(prefix []byte, start []byte, cb func(key, val return } -func (db *LevelDB) PrefixedReverseIterate(prefix []byte, start []byte, cb func(key, value []byte) (stop bool, err error)) (iterErr error) { +// ReverseIterate iterates over the key-value pairs in the database with prefixing the keys in reverse order. +func (db *LevelDB) ReverseIterate(prefix []byte, start []byte, cb func(key, value []byte) (stop bool, err error)) (iterErr error) { iter := db.db.NewIterator(util.BytesPrefix(db.PrefixedKey(prefix)), nil) defer func() { iter.Release() @@ -150,6 +136,7 @@ func (db *LevelDB) PrefixedReverseIterate(prefix []byte, start []byte, cb func(k } // SeekPrevInclusiveKey seeks the previous key-value pair in the database with prefixing the keys. +// If the key is found, it returns the key-value pair. // // @dev: `LevelDB.prefix + prefix` is used as the prefix for the iteration. func (db *LevelDB) SeekPrevInclusiveKey(prefix []byte, key []byte) (k []byte, v []byte, err error) { @@ -194,14 +181,10 @@ func (db LevelDB) UnprefixedKey(key []byte) []byte { return bytes.TrimPrefix(key, append(db.prefix, dbtypes.Splitter)) } -func (db LevelDB) GetPath() string { - return db.path +func (db LevelDB) GetPrefix() []byte { + return db.prefix } -func (db LevelDB) GetPrefix() []byte { - splits := bytes.Split(db.prefix, []byte{dbtypes.Splitter}) - if len(splits) == 0 { - return nil - } - return splits[len(splits)-1] +func (db *LevelDB) NewStage() types.CommitDB { + return newStage(db) } diff --git a/db/db_test.go b/db/db_test.go new file mode 100644 index 0000000..92cad6d --- /dev/null +++ b/db/db_test.go @@ -0,0 +1,372 @@ +package db + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + dbtypes "github.com/initia-labs/opinit-bots/db/types" + "github.com/initia-labs/opinit-bots/types" +) + +func makeTestSet() []types.KV { + pairs := make([]types.KV, 0) + + pairs = append(pairs, types.KV{Key: []byte("key1"), Value: []byte("value1")}) + pairs = append(pairs, types.KV{Key: []byte("key2"), Value: []byte("value2")}) + pairs = append(pairs, types.KV{Key: []byte("key3"), Value: []byte("value3")}) + pairs = append(pairs, types.KV{Key: []byte("key4"), Value: []byte("value4")}) + pairs = append(pairs, types.KV{Key: []byte("key5"), Value: []byte("value5")}) + + for i := range 1000 { + pairs = append(pairs, types.KV{Key: append([]byte("key3"), dbtypes.FromUint64Key(uint64(i))...), Value: dbtypes.FromInt64(int64(i))}) + } + + for i := 5; i <= 5000; i += 5 { + pairs = append(pairs, types.KV{Key: append([]byte("key4"), dbtypes.FromUint64Key(uint64(i))...), Value: dbtypes.FromInt64(int64(i))}) + } + return pairs +} + +func CreateTestDB(t *testing.T, pairs []types.KV) *LevelDB { + db, err := NewMemDB() + require.NoError(t, err) + + for _, pair := range pairs { + err := db.Set(pair.Key, pair.Value) + require.NoError(t, err) + } + return db +} + +func TestNewDB(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + require.NotNil(t, db) +} + +func TestClose(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + + err = db.Close() + require.NoError(t, err) + + err = db.Close() + require.Error(t, err) +} + +func TestPrefix(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + + require.Equal(t, db.prefix, []byte(nil)) + require.Equal(t, db.GetPrefix(), []byte(nil)) + + db.prefix = []byte("abc") + require.Equal(t, db.GetPrefix(), []byte("abc")) +} + +func TestPrefixedKey(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + + prefixedKey := db.PrefixedKey([]byte("key1")) + require.Equal(t, prefixedKey, []byte("/key1")) + + db.prefix = []byte("abc") + prefixedKey = db.PrefixedKey([]byte("key1")) + require.Equal(t, prefixedKey, []byte("abc/key1")) + + db.prefix = []byte("abc/def") + prefixedKey = db.PrefixedKey([]byte("key2")) + require.Equal(t, prefixedKey, []byte("abc/def/key2")) +} + +func TestUnprefixedKey(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + + unprefixedKey := db.UnprefixedKey([]byte("key1")) + require.Equal(t, unprefixedKey, []byte("key1")) + + unprefixedKey = db.UnprefixedKey([]byte("/key1")) + require.Equal(t, unprefixedKey, []byte("key1")) + + db.prefix = []byte("abc") + unprefixedKey = db.UnprefixedKey([]byte("abc/key1")) + require.Equal(t, unprefixedKey, []byte("key1")) + + db.prefix = []byte("abc/def") + unprefixedKey = db.UnprefixedKey([]byte("abc/def/key2")) + require.Equal(t, unprefixedKey, []byte("key2")) +} + +func TestWithPrefix(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + + newDB := db.WithPrefix([]byte("abc")) + require.Equal(t, newDB.GetPrefix(), []byte("/abc")) + + newDB2 := newDB.WithPrefix([]byte("abc")) + require.Equal(t, newDB2.GetPrefix(), []byte("/abc/abc")) + + newDB3 := newDB2.WithPrefix([]byte("abc/def")) + require.Equal(t, newDB3.GetPrefix(), []byte("/abc/abc/abc/def")) +} + +func TestSet(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + + cases := []struct { + title string + key []byte + value []byte + expected bool + }{ + {"simple set", []byte("key1"), []byte("value1"), true}, + {"duplicated key", []byte("key1"), []byte("value2"), true}, + {"empty key", []byte(""), []byte("value1"), true}, + {"empty value", []byte("key2"), []byte(""), true}, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + err := db.Set(tc.key, tc.value) + if tc.expected { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +func TestGet(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + + cases := []struct { + title string + key []byte + value []byte + expected []bool + }{ + {"simple get", []byte("key1"), []byte("value1"), []bool{false, true, true}}, + {"empty key", []byte(""), []byte("value1"), []bool{false, true, true}}, + {"empty value", []byte("key2"), nil, []bool{false, true, true}}, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + _, err := db.Get(tc.key) + if tc.expected[0] { + require.NoError(t, err) + } else { + require.Error(t, err) + } + + err = db.Set(tc.key, tc.value) + if tc.expected[1] { + require.NoError(t, err) + } else { + require.Error(t, err) + } + + value, err := db.Get(tc.key) + if tc.expected[2] { + require.NoError(t, err) + require.Equal(t, tc.value, value) + } else { + require.Error(t, err) + } + }) + } +} + +func TestDelete(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + + cases := []struct { + title string + key []byte + value []byte + expected bool + }{ + {"simple delete", []byte("key1"), []byte("value1"), true}, + {"not existing key", []byte("key1"), nil, true}, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + if tc.value != nil { + err = db.Set(tc.key, tc.value) + require.NoError(t, err) + } + + err := db.Delete(tc.key) + if tc.expected { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +func TestBatchSet(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + + pairs := makeTestSet() + + err = db.BatchSet(pairs...) + require.NoError(t, err) + + for _, pair := range pairs { + value, err := db.Get(pair.Key) + require.NoError(t, err) + require.Equal(t, pair.Value, value) + } + + err = db.BatchSet([]types.KV{}...) + require.NoError(t, err) +} + +func TestIterate(t *testing.T) { + pairs := makeTestSet() + db := CreateTestDB(t, pairs) + + cases := []struct { + title string + prefix []byte + start []byte + expected int + }{ + {"empty prefix", nil, nil, 2005}, + {"prefix key3", []byte("key3"), nil, 1001}, + {"prefix key3 start 1", []byte("key3"), append([]byte("key3"), dbtypes.FromUint64Key(1)...), 999}, + {"prefix key3 start 500", []byte("key3"), append([]byte("key3"), dbtypes.FromUint64Key(500)...), 500}, + {"prefix key3 start 999", []byte("key3"), append([]byte("key3"), dbtypes.FromUint64Key(999)...), 1}, + {"prefix key3 start 1000", []byte("key3"), append([]byte("key3"), dbtypes.FromUint64Key(1000)...), 0}, + {"prefix key4 start 1001", []byte("key4"), append([]byte("key4"), dbtypes.FromUint64Key(1001)...), 800}, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + i := 0 + err := db.Iterate(tc.prefix, tc.start, func(key, value []byte) (stop bool, err error) { + i++ + return false, nil + }) + require.NoError(t, err) + require.Equal(t, tc.expected, i) + }) + } +} + +func TestIterateError(t *testing.T) { + pairs := makeTestSet() + db := CreateTestDB(t, pairs) + + err := db.Iterate(nil, nil, func(key, value []byte) (stop bool, err error) { + return false, nil + }) + require.NoError(t, err) + + err = db.Iterate(nil, nil, func(key, value []byte) (stop bool, err error) { + return true, nil + }) + require.NoError(t, err) + + err = db.Iterate(nil, nil, func(key, value []byte) (stop bool, err error) { + return false, errors.New("simple error") + }) + require.Error(t, err) +} + +func TestReverseIterate(t *testing.T) { + pairs := makeTestSet() + db := CreateTestDB(t, pairs) + + cases := []struct { + title string + prefix []byte + start []byte + expected int + }{ + {"empty prefix", nil, nil, 2005}, + {"prefix key3", []byte("key3"), nil, 1001}, + {"prefix key3 start 1", []byte("key3"), append([]byte("key3"), dbtypes.FromUint64Key(1)...), 3}, + {"prefix key3 start 500", []byte("key3"), append([]byte("key3"), dbtypes.FromUint64Key(500)...), 502}, + {"prefix key3 start 999", []byte("key3"), append([]byte("key3"), dbtypes.FromUint64Key(999)...), 1001}, + {"prefix key3 start 1000", []byte("key3"), append([]byte("key3"), dbtypes.FromUint64Key(1000)...), 1001}, + {"prefix key4 start 3", []byte("key4"), append([]byte("key4"), dbtypes.FromUint64Key(3)...), 1}, + {"prefix key4 start 4997", []byte("key4"), append([]byte("key4"), dbtypes.FromUint64Key(4997)...), 1000}, + {"prefix key4 start 1000", []byte("key4"), append([]byte("key4"), dbtypes.FromUint64Key(1000)...), 201}, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + i := 0 + err := db.ReverseIterate(tc.prefix, tc.start, func(key, value []byte) (stop bool, err error) { + i++ + return false, nil + }) + require.NoError(t, err) + require.Equal(t, tc.expected, i) + }) + } +} + +func TestSeekPrevInclusiveKey(t *testing.T) { + pairs := makeTestSet() + db := CreateTestDB(t, pairs) + + cases := []struct { + title string + prefix []byte + start []byte + expected []byte + err bool + }{ + {"prefix key0", []byte("key0"), nil, []byte("key0"), true}, + {"prefix key3 start 1", []byte("key3"), append([]byte("key3"), dbtypes.FromUint64Key(1)...), append([]byte("key3"), dbtypes.FromUint64Key(1)...), false}, + {"prefix key3 start 500", []byte("key3"), append([]byte("key3"), dbtypes.FromUint64Key(500)...), append([]byte("key3"), dbtypes.FromUint64Key(500)...), false}, + {"prefix key3 start 1000", []byte("key3"), append([]byte("key3"), dbtypes.FromUint64Key(1000)...), append([]byte("key3"), dbtypes.FromUint64Key(999)...), false}, + {"prefix key4 start 0", []byte("key4"), append([]byte("key4"), dbtypes.FromUint64Key(0)...), []byte("key4"), false}, + {"prefix key4 start 4997", []byte("key4"), append([]byte("key4"), dbtypes.FromUint64Key(4997)...), append([]byte("key4"), dbtypes.FromUint64Key(4995)...), false}, + {"prefix key4 start 1000", []byte("key4"), append([]byte("key4"), dbtypes.FromUint64Key(1000)...), append([]byte("key4"), dbtypes.FromUint64Key(1000)...), false}, + {"prefix key4 start 5005", []byte("key4"), append([]byte("key4"), dbtypes.FromUint64Key(5005)...), append([]byte("key4"), dbtypes.FromUint64Key(5000)...), false}, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + key, _, err := db.SeekPrevInclusiveKey(tc.prefix, tc.start) + if tc.err { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tc.expected, key) + } + }) + } +} + +func TestNewStage(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + + tstage := db.NewStage() + require.NotNil(t, tstage) + + stage, ok := tstage.(Stage) + require.True(t, ok) + require.Equal(t, stage.batch.Len(), 0) + require.Equal(t, len(stage.kvmap), 0) + require.Equal(t, stage.parent, db) +} diff --git a/db/stage.go b/db/stage.go new file mode 100644 index 0000000..f1bbf89 --- /dev/null +++ b/db/stage.go @@ -0,0 +1,76 @@ +package db + +import ( + "github.com/initia-labs/opinit-bots/types" + "github.com/syndtr/goleveldb/leveldb" + "golang.org/x/exp/maps" +) + +type Stage struct { + batch *leveldb.Batch + kvmap map[string][]byte + parent *LevelDB + + prefixedKey func(key []byte) []byte +} + +func newStage(parent *LevelDB) Stage { + return Stage{ + batch: new(leveldb.Batch), + kvmap: make(map[string][]byte), + parent: parent, + + prefixedKey: parent.PrefixedKey, + } +} + +func (s Stage) WithPrefixedKey(prefixedKey func(key []byte) []byte) types.CommitDB { + s.prefixedKey = prefixedKey + return s +} + +var _ types.CommitDB = Stage{} + +func (s Stage) Set(key []byte, value []byte) error { + prefixedKey := s.prefixedKey(key) + s.batch.Put(prefixedKey, value) + s.kvmap[string(prefixedKey)] = value + return nil +} + +func (s Stage) Get(key []byte) ([]byte, error) { + prefixedKey := s.prefixedKey(key) + value, ok := s.kvmap[string(prefixedKey)] + if ok { + return value, nil + } + return s.parent.Get(key) +} + +func (s Stage) Delete(key []byte) error { + prefixedKey := s.prefixedKey(key) + s.batch.Delete(prefixedKey) + s.kvmap[string(prefixedKey)] = nil + return nil +} + +func (s Stage) Commit() error { + err := s.parent.db.Write(s.batch, nil) + if err != nil { + return err + } + return nil +} + +func (s Stage) Len() int { + return s.batch.Len() +} + +func (s Stage) Reset() { + s.batch.Reset() + maps.Clear(s.kvmap) +} + +func (s Stage) All() map[string][]byte { + return maps.Clone(s.kvmap) +} diff --git a/db/stage_test.go b/db/stage_test.go new file mode 100644 index 0000000..346dfc7 --- /dev/null +++ b/db/stage_test.go @@ -0,0 +1,256 @@ +package db + +import ( + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" +) + +func CreateTestStage(t *testing.T, db *LevelDB) (*LevelDB, Stage, error) { + var err error + if db == nil { + db, err = NewMemDB() + require.NoError(t, err) + } + tstage := db.NewStage() + stage, ok := tstage.(Stage) + require.True(t, ok) + return db, stage, nil +} + +func TestStageReset(t *testing.T) { + _, stage, err := CreateTestStage(t, nil) + require.NoError(t, err) + + stage.Reset() + require.Equal(t, stage.batch.Len(), 0) + require.Equal(t, len(stage.kvmap), 0) + + err = stage.Set([]byte("key"), []byte("value")) + require.NoError(t, err) + require.Equal(t, stage.batch.Len(), 1) + require.Equal(t, len(stage.kvmap), 1) + + stage.Reset() + require.Equal(t, stage.batch.Len(), 0) + require.Equal(t, len(stage.kvmap), 0) +} + +func TestStageLen(t *testing.T) { + _, stage, err := CreateTestStage(t, nil) + require.NoError(t, err) + + stage.Reset() + require.Equal(t, stage.Len(), 0) + + err = stage.Set([]byte("key"), []byte("value")) + require.NoError(t, err) + require.Equal(t, stage.Len(), 1) + + stage.Reset() + require.Equal(t, stage.Len(), 0) +} + +func TestStageSet(t *testing.T) { + _, stage, err := CreateTestStage(t, nil) + require.NoError(t, err) + + cases := []struct { + title string + key []byte + value []byte + expected bool + }{ + {"simple set", []byte("key1"), []byte("value1"), true}, + {"duplicated key", []byte("key1"), []byte("value2"), true}, + {"empty key", []byte(""), []byte("value1"), true}, + {"empty value", []byte("key2"), []byte(""), true}, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + err := stage.Set(tc.key, tc.value) + if tc.expected { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +func TestStageGet(t *testing.T) { + _, stage, err := CreateTestStage(t, nil) + require.NoError(t, err) + + cases := []struct { + title string + key []byte + value []byte + expected []bool + }{ + {"simple get", []byte("key1"), []byte("value1"), []bool{false, true, true}}, + {"empty key", []byte(""), []byte("value1"), []bool{false, true, true}}, + {"empty value", []byte("key2"), nil, []bool{false, true, true}}, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + _, err := stage.Get(tc.key) + if tc.expected[0] { + require.NoError(t, err) + } else { + require.Error(t, err) + } + + err = stage.Set(tc.key, tc.value) + if tc.expected[1] { + require.NoError(t, err) + } else { + require.Error(t, err) + } + + value, err := stage.Get(tc.key) + if tc.expected[2] { + require.NoError(t, err) + require.Equal(t, tc.value, value) + } else { + require.Error(t, err) + } + }) + } +} + +func TestStageDelete(t *testing.T) { + _, stage, err := CreateTestStage(t, nil) + require.NoError(t, err) + + cases := []struct { + title string + key []byte + value []byte + expected bool + }{ + {"simple delete", []byte("key1"), []byte("value1"), true}, + {"not existing key", []byte("key1"), nil, true}, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + if tc.value != nil { + err = stage.Set(tc.key, tc.value) + require.NoError(t, err) + } + + err := stage.Delete(tc.key) + if tc.expected { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +func TestStageCommit(t *testing.T) { + db := CreateTestDB(t, makeTestSet()) + _, stage, err := CreateTestStage(t, db) + require.NoError(t, err) + + err = stage.Delete([]byte("key1")) + require.NoError(t, err) + + err = stage.Delete([]byte("key1")) + require.NoError(t, err) + + err = stage.Set([]byte("key2"), []byte("new_value2")) + require.NoError(t, err) + + err = stage.Set([]byte("key10"), []byte("value10")) + require.NoError(t, err) + + err = stage.Commit() + require.NoError(t, err) + stage.Reset() + + _, err = db.Get([]byte("key1")) + require.Error(t, err) + value, err := db.Get([]byte("key2")) + require.NoError(t, err) + require.Equal(t, []byte("new_value2"), value) + + _, err = stage.Get([]byte("key1")) + require.Error(t, err) + value, err = stage.Get([]byte("key2")) + require.NoError(t, err) + require.Equal(t, []byte("new_value2"), value) + + value, err = stage.Get([]byte("key10")) + require.NoError(t, err) + require.Equal(t, []byte("value10"), value) + + err = stage.Delete([]byte("key11")) + require.NoError(t, err) + + err = stage.Commit() + require.NoError(t, err) + + _, err = db.Get([]byte("key11")) + require.Error(t, err) +} + +func TestWithPrefixedKey(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + + tdb1 := db.WithPrefix([]byte("123")) + db1 := tdb1.(*LevelDB) + db2 := db.WithPrefix([]byte("456")) + + _, stage1, err := CreateTestStage(t, db1) + require.NoError(t, err) + + err = stage1.Set([]byte("key1"), []byte("value1")) + require.NoError(t, err) + + err = stage1.WithPrefixedKey(db2.PrefixedKey).Set([]byte("key1"), []byte("value2")) + require.NoError(t, err) + + // previous WithPrefixedKey should not affect the new one + err = stage1.Set([]byte("key2"), []byte("value2")) + require.NoError(t, err) + + require.Equal(t, stage1.kvmap[string(db1.PrefixedKey([]byte("key1")))], []byte("value1")) + require.Equal(t, stage1.kvmap[string(db1.PrefixedKey([]byte("key2")))], []byte("value2")) + require.Equal(t, stage1.kvmap[string(db2.PrefixedKey([]byte("key1")))], []byte("value2")) +} + +func TestStageAll(t *testing.T) { + db, err := NewMemDB() + require.NoError(t, err) + + tdb1 := db.WithPrefix([]byte("123")) + db1 := tdb1.(*LevelDB) + db2 := db.WithPrefix([]byte("456")) + + _, stage1, err := CreateTestStage(t, db1) + require.NoError(t, err) + + err = stage1.Set([]byte("key1"), []byte("value1")) + require.NoError(t, err) + + err = stage1.WithPrefixedKey(db2.PrefixedKey).Set([]byte("key1"), []byte("value2")) + require.NoError(t, err) + + allKVs := stage1.All() + require.Equal(t, len(allKVs), 2) + require.Equal(t, allKVs["/123/key1"], []byte("value1")) + require.Equal(t, allKVs["/456/key1"], []byte("value2")) + + maps.Clear(allKVs) + require.Empty(t, allKVs) + + allKVs = stage1.All() + require.Equal(t, len(allKVs), 2) +} diff --git a/db/testutils.go b/db/testutils.go new file mode 100644 index 0000000..0218495 --- /dev/null +++ b/db/testutils.go @@ -0,0 +1,19 @@ +package db + +import ( + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +func NewMemDB() (*LevelDB, error) { + s := storage.NewMemStorage() + + db, err := leveldb.Open(s, nil) + if err != nil { + return nil, err + } + + return &LevelDB{ + db: db, + }, nil +} diff --git a/db/types/const.go b/db/types/const.go new file mode 100644 index 0000000..03a5ffc --- /dev/null +++ b/db/types/const.go @@ -0,0 +1,3 @@ +package types + +const Splitter = '/' diff --git a/db/types/errors.go b/db/types/errors.go index dd6a5f7..fa25ff1 100644 --- a/db/types/errors.go +++ b/db/types/errors.go @@ -1,5 +1,10 @@ package types -import "github.com/syndtr/goleveldb/leveldb" +import ( + "errors" + + "github.com/syndtr/goleveldb/leveldb" +) var ErrNotFound = leveldb.ErrNotFound +var ErrInvalidParentDBType = errors.New("invalid parent DB type") diff --git a/db/types/utils.go b/db/types/utils.go index e2771a5..cfe97e6 100644 --- a/db/types/utils.go +++ b/db/types/utils.go @@ -1,13 +1,12 @@ package types import ( + "bytes" "encoding/binary" "fmt" "strconv" ) -const Splitter = '/' - func FromInt64(v int64) []byte { return []byte(fmt.Sprintf("%d", v)) } @@ -15,7 +14,7 @@ func FromInt64(v int64) []byte { func ToInt64(v []byte) (int64, error) { data, err := strconv.ParseInt(string(v), 10, 64) if err != nil { - return 0, fmt.Errorf("failed to parse uint64 from %s: %w", string(v), err) + return 0, fmt.Errorf("failed to parse int64 from %s: %w", string(v), err) } return data, nil } @@ -42,3 +41,11 @@ func FromUint64Key(v uint64) []byte { func ToUint64Key(data []byte) (v uint64) { return binary.BigEndian.Uint64(data) } + +func GenerateKey(parts [][]byte) []byte { + return bytes.Join(parts, []byte{Splitter}) +} + +func AppendSplitter(data []byte) []byte { + return append(data, Splitter) +} diff --git a/db/types/utils_test.go b/db/types/utils_test.go index 0fea785..25373e9 100644 --- a/db/types/utils_test.go +++ b/db/types/utils_test.go @@ -8,7 +8,11 @@ import ( "github.com/stretchr/testify/require" ) -func TestUint64Key(t *testing.T) { +func TestFromToUint64Key(t *testing.T) { + bytes0 := types.FromUint64Key(0) + res0 := types.ToUint64Key(bytes0) + require.Equal(t, uint64(0), res0) + bytes10 := types.FromUint64Key(10) res10 := types.ToUint64Key(bytes10) require.Equal(t, uint64(10), res10) @@ -19,3 +23,47 @@ func TestUint64Key(t *testing.T) { require.True(t, bytes.Compare(bytes10, bytes100) < 0) } + +func TestFromToInt64(t *testing.T) { + bytesm10 := types.FromInt64(-10) + require.Equal(t, []byte("-10"), bytesm10) + resm10, err := types.ToInt64(bytesm10) + require.NoError(t, err) + require.Equal(t, int64(-10), resm10) + + bytes0 := types.FromInt64(0) + require.Equal(t, []byte("0"), bytes0) + res0, err := types.ToInt64(bytes0) + require.NoError(t, err) + require.Equal(t, int64(0), res0) + + bytes10 := types.FromInt64(10) + require.Equal(t, []byte("10"), bytes10) + res10, err := types.ToInt64(bytes10) + require.NoError(t, err) + require.Equal(t, int64(10), res10) +} + +func TestFromToUInt64(t *testing.T) { + bytes0 := types.FromUint64(0) + require.Equal(t, []byte("0"), bytes0) + res0, err := types.ToUint64(bytes0) + require.NoError(t, err) + require.Equal(t, uint64(0), res0) + + bytes10 := types.FromUint64(10) + require.Equal(t, []byte("10"), bytes10) + res10, err := types.ToUint64(bytes10) + require.NoError(t, err) + require.Equal(t, uint64(10), res10) +} + +func TestGenerateKey(t *testing.T) { + key := types.GenerateKey([][]byte{{1}, {2}, {3}}) + require.Equal(t, []byte{1, types.Splitter, 2, types.Splitter, 3}, key) +} + +func TestAppendSplitter(t *testing.T) { + key := types.AppendSplitter([]byte{1, 2, 3}) + require.Equal(t, []byte{1, 2, 3, types.Splitter}, key) +} diff --git a/executor/batch/db.go b/executor/batch/db.go deleted file mode 100644 index c77b54b..0000000 --- a/executor/batch/db.go +++ /dev/null @@ -1,40 +0,0 @@ -package batch - -import ( - "encoding/json" - - dbtypes "github.com/initia-labs/opinit-bots/db/types" - "github.com/initia-labs/opinit-bots/types" -) - -var LocalBatchInfoKey = []byte("local_batch_info") - -func (bs *BatchSubmitter) loadLocalBatchInfo() error { - val, err := bs.db.Get(LocalBatchInfoKey) - if err != nil { - if err == dbtypes.ErrNotFound { - return nil - } - return err - } - return json.Unmarshal(val, &bs.localBatchInfo) -} - -func (bs *BatchSubmitter) localBatchInfoToRawKV() (types.RawKV, error) { - value, err := json.Marshal(bs.localBatchInfo) - if err != nil { - return types.RawKV{}, err - } - return types.RawKV{ - Key: bs.db.PrefixedKey(LocalBatchInfoKey), - Value: value, - }, nil -} - -func (bs *BatchSubmitter) saveLocalBatchInfo() error { - value, err := json.Marshal(bs.localBatchInfo) - if err != nil { - return err - } - return bs.db.Set(LocalBatchInfoKey, value) -} diff --git a/executor/batch/handler.go b/executor/batch/handler.go deleted file mode 100644 index a86ab6f..0000000 --- a/executor/batch/handler.go +++ /dev/null @@ -1,339 +0,0 @@ -package batch - -import ( - "bytes" - "context" - "fmt" - "io" - "time" - - "github.com/pkg/errors" - "go.uber.org/zap" - - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - - "github.com/cosmos/gogoproto/proto" - - sdk "github.com/cosmos/cosmos-sdk/types" - - ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" - - executortypes "github.com/initia-labs/opinit-bots/executor/types" - btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" - nodetypes "github.com/initia-labs/opinit-bots/node/types" - "github.com/initia-labs/opinit-bots/types" -) - -func (bs *BatchSubmitter) rawBlockHandler(ctx context.Context, args nodetypes.RawBlockArgs) error { - // clear processed messages - bs.processedMsgs = bs.processedMsgs[:0] - - pbb := new(cmtproto.Block) - err := proto.Unmarshal(args.BlockBytes, pbb) - if err != nil { - return errors.Wrap(err, "failed to unmarshal block") - } - - err = bs.prepareBatch(args.BlockHeight) - if err != nil { - return errors.Wrap(err, "failed to prepare batch") - } - - blockBytes, err := bs.emptyOracleData(pbb) - if err != nil { - return err - } - - _, err = bs.handleBatch(blockBytes) - if err != nil { - return errors.Wrap(err, "failed to handle batch") - } - - err = bs.checkBatch(ctx, args.BlockHeight, args.LatestHeight, pbb.Header.Time) - if err != nil { - return errors.Wrap(err, "failed to check batch") - } - - // store the processed state into db with batch operation - batchKVs := make([]types.RawKV, 0) - batchKVs = append(batchKVs, bs.node.SyncInfoToRawKV(args.BlockHeight)) - batchMsgKVs, err := bs.da.ProcessedMsgsToRawKV(bs.processedMsgs, false) - if err != nil { - return errors.Wrap(err, "failed to convert processed messages to raw key value") - } - batchKVs = append(batchKVs, batchMsgKVs...) - - kv, err := bs.localBatchInfoToRawKV() - if err != nil { - return err - } - batchKVs = append(batchKVs, kv) - - err = bs.db.RawBatchSet(batchKVs...) - if err != nil { - return errors.Wrap(err, "failed to set raw batch") - } - // broadcast processed messages - for _, processedMsg := range bs.processedMsgs { - bs.da.BroadcastMsgs(processedMsg) - } - return nil -} - -func (bs *BatchSubmitter) prepareBatch(blockHeight int64) error { - err := bs.loadLocalBatchInfo() - if err != nil { - return err - } - - // check whether the requested block height is reached to the l2 block number of the next batch info. - if nextBatchInfo := bs.NextBatchInfo(); nextBatchInfo != nil && types.MustUint64ToInt64(nextBatchInfo.Output.L2BlockNumber) < blockHeight { - // if the next batch info is reached, finalize the current batch and update the batch info. - if bs.batchWriter != nil { - err := bs.batchWriter.Close() - if err != nil { - return errors.Wrap(err, "failed to close batch writer") - } - } - err := bs.batchFile.Truncate(0) - if err != nil { - return errors.Wrap(err, "failed to truncate batch file") - } - _, err = bs.batchFile.Seek(0, 0) - if err != nil { - return errors.Wrap(err, "failed to seek batch file") - } - - // save sync info - err = bs.node.SaveSyncInfo(types.MustUint64ToInt64(nextBatchInfo.Output.L2BlockNumber)) - if err != nil { - return errors.Wrap(err, "failed to save sync info") - } - bs.localBatchInfo.Start = types.MustUint64ToInt64(nextBatchInfo.Output.L2BlockNumber) + 1 - bs.localBatchInfo.End = 0 - bs.localBatchInfo.BatchFileSize = 0 - err = bs.saveLocalBatchInfo() - if err != nil { - return err - } - // set last processed block height to l2 block number - bs.node.SetSyncInfo(types.MustUint64ToInt64(nextBatchInfo.Output.L2BlockNumber)) - bs.DequeueBatchInfo() - - // error will restart block process from nextBatchInfo.Output.L2BlockNumber + 1 - panic(fmt.Errorf("batch info updated: reset from %d", nextBatchInfo.Output.L2BlockNumber)) - } - - if bs.localBatchInfo.End != 0 { - // reset batch file - err := bs.batchFile.Truncate(0) - if err != nil { - return err - } - _, err = bs.batchFile.Seek(0, 0) - if err != nil { - return err - } - - bs.localBatchInfo.BatchFileSize = 0 - bs.localBatchInfo.Start = blockHeight - bs.localBatchInfo.End = 0 - - bs.batchWriter.Reset(bs.batchFile) - } - return nil -} - -// write block bytes to batch file -func (bs *BatchSubmitter) handleBatch(blockBytes []byte) (int, error) { - return bs.batchWriter.Write(prependLength(blockBytes)) -} - -// finalize batch and create batch messages -func (bs *BatchSubmitter) finalizeBatch(ctx context.Context, blockHeight int64) error { - // write last block's commit to batch file - rawCommit, err := bs.node.GetRPCClient().QueryRawCommit(ctx, blockHeight) - if err != nil { - return errors.Wrap(err, "failed to query raw commit") - } - _, err = bs.batchWriter.Write(prependLength(rawCommit)) - if err != nil { - return errors.Wrap(err, "failed to write raw commit") - } - err = bs.batchWriter.Close() - if err != nil { - return errors.Wrap(err, "failed to close batch writer") - } - fileSize, err := bs.batchFileSize(false) - if err != nil { - return err - } - bs.localBatchInfo.BatchFileSize = fileSize - - batchBuffer := make([]byte, bs.batchCfg.MaxChunkSize) - checksums := make([][]byte, 0) - - // TODO: improve this logic to avoid hold all the batch data in memory - chunks := make([][]byte, 0) - for offset := int64(0); ; { - readLength, err := bs.batchFile.ReadAt(batchBuffer, offset) - if err != nil && err != io.EOF { - return err - } else if readLength == 0 { - break - } - - // trim the buffer to the actual read length - chunk := bytes.Clone(batchBuffer[:readLength]) - chunks = append(chunks, chunk) - - checksum := executortypes.GetChecksumFromChunk(chunk) - checksums = append(checksums, checksum[:]) - if int64(readLength) < bs.batchCfg.MaxChunkSize { - break - } - offset += int64(readLength) - } - - headerData := executortypes.MarshalBatchDataHeader( - types.MustInt64ToUint64(bs.localBatchInfo.Start), - types.MustInt64ToUint64(bs.localBatchInfo.End), - checksums, - ) - - msg, sender, err := bs.da.CreateBatchMsg(headerData) - if err != nil { - return err - } else if msg != nil { - bs.processedMsgs = append(bs.processedMsgs, btypes.ProcessedMsgs{ - Sender: sender, - Msgs: []sdk.Msg{msg}, - Timestamp: time.Now().UnixNano(), - Save: true, - }) - } - - for i, chunk := range chunks { - chunkData := executortypes.MarshalBatchDataChunk( - types.MustInt64ToUint64(bs.localBatchInfo.Start), - types.MustInt64ToUint64(bs.localBatchInfo.End), - types.MustInt64ToUint64(int64(i)), - types.MustInt64ToUint64(int64(len(checksums))), - chunk, - ) - msg, sender, err := bs.da.CreateBatchMsg(chunkData) - if err != nil { - return err - } else if msg != nil { - bs.processedMsgs = append(bs.processedMsgs, btypes.ProcessedMsgs{ - Sender: sender, - Msgs: []sdk.Msg{msg}, - Timestamp: time.Now().UnixNano(), - Save: true, - }) - } - } - - bs.logger.Info("finalize batch", - zap.Int64("height", blockHeight), - zap.Int64("batch start", bs.localBatchInfo.Start), - zap.Int64("batch end", bs.localBatchInfo.End), - zap.Int64("batch file size ", bs.localBatchInfo.BatchFileSize), - zap.Int("chunks", len(checksums)), - zap.Int("txs", len(bs.processedMsgs)), - ) - return nil -} - -func (bs *BatchSubmitter) checkBatch(ctx context.Context, blockHeight int64, latestHeight int64, blockTime time.Time) error { - fileSize, err := bs.batchFileSize(true) - if err != nil { - return err - } - - bs.localBatchInfo.BatchFileSize = fileSize - // if the block time is after the last submission time + submission interval * 2/3 - // or the block time is after the last submission time + max submission time - // or the batch file size is greater than (max chunks - 1) * max chunk size - // then finalize the batch - if (blockHeight == latestHeight && blockTime.After(bs.localBatchInfo.LastSubmissionTime.Add(bs.bridgeInfo.BridgeConfig.SubmissionInterval*2/3))) || - (blockHeight == latestHeight && blockTime.After(bs.localBatchInfo.LastSubmissionTime.Add(time.Duration(bs.batchCfg.MaxSubmissionTime)*time.Second))) || - fileSize > (bs.batchCfg.MaxChunks-1)*bs.batchCfg.MaxChunkSize { - - // finalize the batch - bs.LastBatchEndBlockNumber = blockHeight - bs.localBatchInfo.LastSubmissionTime = blockTime - bs.localBatchInfo.End = blockHeight - - err := bs.finalizeBatch(ctx, blockHeight) - if err != nil { - return errors.Wrap(err, "failed to finalize batch") - } - } - return nil -} - -func (bs *BatchSubmitter) batchFileSize(flush bool) (int64, error) { - if bs.batchFile == nil { - return 0, errors.New("batch file is not initialized") - } - if flush { - err := bs.batchWriter.Flush() - if err != nil { - return 0, errors.Wrap(err, "failed to flush batch writer") - } - } - - info, err := bs.batchFile.Stat() - if err != nil { - return 0, errors.Wrap(err, "failed to get batch file stat") - } - return info.Size(), nil -} - -// UpdateBatchInfo appends the batch info with the given chain, submitter, output index, and l2 block number -func (bs *BatchSubmitter) UpdateBatchInfo(chain string, submitter string, outputIndex uint64, l2BlockNumber int64) { - bs.batchInfoMu.Lock() - defer bs.batchInfoMu.Unlock() - - // check if the batch info is already updated - if types.MustUint64ToInt64(bs.batchInfos[len(bs.batchInfos)-1].Output.L2BlockNumber) >= l2BlockNumber { - return - } - - bs.batchInfos = append(bs.batchInfos, ophosttypes.BatchInfoWithOutput{ - BatchInfo: ophosttypes.BatchInfo{ - ChainType: ophosttypes.BatchInfo_ChainType(ophosttypes.BatchInfo_ChainType_value["CHAIN_TYPE_"+chain]), - Submitter: submitter, - }, - Output: ophosttypes.Output{ - L2BlockNumber: types.MustInt64ToUint64(l2BlockNumber), - }, - }) -} - -// BatchInfo returns the current batch info -func (bs *BatchSubmitter) BatchInfo() *ophosttypes.BatchInfoWithOutput { - bs.batchInfoMu.Lock() - defer bs.batchInfoMu.Unlock() - - return &bs.batchInfos[0] -} - -// NextBatchInfo returns the next batch info in the queue -func (bs *BatchSubmitter) NextBatchInfo() *ophosttypes.BatchInfoWithOutput { - bs.batchInfoMu.Lock() - defer bs.batchInfoMu.Unlock() - if len(bs.batchInfos) == 1 { - return nil - } - return &bs.batchInfos[1] -} - -// DequeueBatchInfo removes the first batch info from the queue -func (bs *BatchSubmitter) DequeueBatchInfo() { - bs.batchInfoMu.Lock() - defer bs.batchInfoMu.Unlock() - - bs.batchInfos = bs.batchInfos[1:] -} diff --git a/executor/batch/noop_da.go b/executor/batch/noop_da.go deleted file mode 100644 index 6a03be6..0000000 --- a/executor/batch/noop_da.go +++ /dev/null @@ -1,30 +0,0 @@ -package batch - -import ( - "context" - - executortypes "github.com/initia-labs/opinit-bots/executor/types" - btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" - nodetypes "github.com/initia-labs/opinit-bots/node/types" - "github.com/initia-labs/opinit-bots/types" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -var _ executortypes.DANode = &NoopDA{} - -type NoopDA struct { -} - -func NewNoopDA() *NoopDA { - return &NoopDA{} -} - -func (n NoopDA) Start(_ context.Context) {} -func (n NoopDA) HasKey() bool { return false } -func (n NoopDA) CreateBatchMsg(_ []byte) (sdk.Msg, string, error) { return nil, "", nil } -func (n NoopDA) BroadcastMsgs(nil btypes.ProcessedMsgs) {} -func (n NoopDA) ProcessedMsgsToRawKV(_ []btypes.ProcessedMsgs, _ bool) ([]types.RawKV, error) { - return nil, nil -} -func (n NoopDA) GetNodeStatus() (nodetypes.Status, error) { return nodetypes.Status{}, nil } diff --git a/executor/batch/utils.go b/executor/batch/utils.go deleted file mode 100644 index ab2301b..0000000 --- a/executor/batch/utils.go +++ /dev/null @@ -1,58 +0,0 @@ -package batch - -import ( - "encoding/binary" - - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - "github.com/cosmos/gogoproto/proto" - - sdk "github.com/cosmos/cosmos-sdk/types" - - opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" - "github.com/initia-labs/opinit-bots/txutils" -) - -// prependLength prepends the length of the data to the data. -func prependLength(data []byte) []byte { - lengthBytes := make([]byte, 8) - binary.LittleEndian.PutUint64(lengthBytes, uint64(len(data))) - return append(lengthBytes, data...) -} - -// emptyOracleData converts the MsgUpdateOracle messages's data field to empty -// to decrease the size of the batch. -func (bs *BatchSubmitter) emptyOracleData(pbb *cmtproto.Block) ([]byte, error) { - for i, txBytes := range pbb.Data.GetTxs() { - txConfig := bs.node.GetTxConfig() - tx, err := txutils.DecodeTx(txConfig, txBytes) - if err != nil { - // ignore not registered tx in codec - continue - } - - msgs := tx.GetMsgs() - if len(msgs) != 1 { - continue - } - - if msg, ok := msgs[0].(*opchildtypes.MsgUpdateOracle); ok { - msg.Data = []byte{} - tx, err := txutils.ChangeMsgsFromTx(txConfig, tx, []sdk.Msg{msg}) - if err != nil { - return nil, err - } - convertedTxBytes, err := txutils.EncodeTx(txConfig, tx) - if err != nil { - return nil, err - } - pbb.Data.Txs[i] = convertedTxBytes - } - } - - // convert block to bytes - blockBytes, err := proto.Marshal(pbb) - if err != nil { - return nil, err - } - return blockBytes, nil -} diff --git a/executor/batchsubmitter/batch.go b/executor/batchsubmitter/batch.go new file mode 100644 index 0000000..3a934ab --- /dev/null +++ b/executor/batchsubmitter/batch.go @@ -0,0 +1,235 @@ +package batchsubmitter + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "time" + + "github.com/pkg/errors" + "go.uber.org/zap" + + sdk "github.com/cosmos/cosmos-sdk/types" + + executortypes "github.com/initia-labs/opinit-bots/executor/types" + "github.com/initia-labs/opinit-bots/node" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + "github.com/initia-labs/opinit-bots/types" +) + +// prepareBatch prepares the batch info for the given block height. +// if there is more than one batch info in the queue and the start block height of the local batch info is greater than the l2 block number of the next batch info, +// it finalizes the current batch and occurs a panic to restart the block process from the next batch info's l2 block number + 1. +func (bs *BatchSubmitter) prepareBatch(blockHeight int64) error { + localBatchInfo, err := GetLocalBatchInfo(bs.DB()) + if err != nil { + return errors.Wrap(err, "failed to get local batch info") + } + bs.localBatchInfo = &localBatchInfo + + // check whether the requested block height is reached to the l2 block number of the next batch info. + if nextBatchInfo := bs.NextBatchInfo(); nextBatchInfo != nil && + types.MustUint64ToInt64(nextBatchInfo.Output.L2BlockNumber) < bs.localBatchInfo.Start { + // if the next batch info is reached, finalize the current batch and update the batch info. + if bs.batchWriter != nil { + err := bs.batchWriter.Close() + if err != nil { + return errors.Wrap(err, "failed to close batch writer") + } + } + err = bs.emptyBatchFile() + if err != nil { + return errors.Wrap(err, "failed to empty batch file") + } + + bs.localBatchInfo.Start = types.MustUint64ToInt64(nextBatchInfo.Output.L2BlockNumber) + 1 + bs.localBatchInfo.End = 0 + bs.localBatchInfo.BatchSize = 0 + err = SaveLocalBatchInfo(bs.DB(), *bs.localBatchInfo) + if err != nil { + return errors.Wrap(err, "failed to save local batch info") + } + + err = node.SetSyncedHeight(bs.DB(), types.MustUint64ToInt64(nextBatchInfo.Output.L2BlockNumber)) + if err != nil { + return errors.Wrap(err, "failed to set synced height") + } + + // error will restart block process from nextBatchInfo.Output.L2BlockNumber + 1 + panic(fmt.Errorf("batch info updated: reset from %d", nextBatchInfo.Output.L2BlockNumber)) + } + + // if the current batch is finalized, reset the batch file and batch info + if bs.localBatchInfo.End != 0 { + err = bs.emptyBatchFile() + if err != nil { + return errors.Wrap(err, "failed to empty batch file") + } + + bs.localBatchInfo.Start = blockHeight + bs.localBatchInfo.End = 0 + bs.localBatchInfo.BatchSize = 0 + bs.batchWriter.Reset(bs.batchFile) + } + return nil +} + +// handleBatch writes the block bytes to the batch file. +func (bs *BatchSubmitter) handleBatch(blockBytes []byte) (int, error) { + if len(blockBytes) == 0 { + return 0, errors.New("block bytes is empty") + } + return bs.batchWriter.Write(prependLength(blockBytes)) +} + +// finalizeBatch finalizes the batch by writing the last block's commit to the batch file. +// it creates batch messages for the batch data and adds them to the processed messages. +// the batch data is divided into chunks and each chunk is added to the processed messages. +func (bs *BatchSubmitter) finalizeBatch(ctx types.Context, blockHeight int64) error { + // write last block's commit to batch file + rawCommit, err := bs.node.GetRPCClient().QueryRawCommit(ctx, blockHeight) + if err != nil { + return errors.Wrap(err, "failed to query raw commit") + } + _, err = bs.batchWriter.Write(prependLength(rawCommit)) + if err != nil { + return errors.Wrap(err, "failed to write raw commit") + } + err = bs.batchWriter.Close() + if err != nil { + return errors.Wrap(err, "failed to close batch writer") + } + fileSize, err := bs.batchFileSize(false) + if err != nil { + return errors.Wrap(err, "failed to get batch file size") + } + bs.localBatchInfo.BatchSize = fileSize + + batchBuffer := make([]byte, bs.batchCfg.MaxChunkSize) + checksums := make([][]byte, 0) + + // TODO: improve this logic to avoid hold all the batch data in memory + chunks := make([][]byte, 0) + for offset := int64(0); ; { + readLength, err := bs.batchFile.ReadAt(batchBuffer, offset) + if err != nil && err != io.EOF { + return errors.Wrap(err, "failed to read batch file") + } else if readLength == 0 { + break + } + + // trim the buffer to the actual read length + chunk := bytes.Clone(batchBuffer[:readLength]) + chunks = append(chunks, chunk) + + checksum := executortypes.GetChecksumFromChunk(chunk) + checksums = append(checksums, checksum[:]) + if int64(readLength) < bs.batchCfg.MaxChunkSize { + break + } + offset += int64(readLength) + } + + headerData := executortypes.MarshalBatchDataHeader( + types.MustInt64ToUint64(bs.localBatchInfo.Start), + types.MustInt64ToUint64(bs.localBatchInfo.End), + checksums, + ) + + msg, sender, err := bs.da.CreateBatchMsg(headerData) + if err != nil { + return errors.Wrap(err, "failed to create batch msg") + } else if msg != nil { + bs.processedMsgs = append(bs.processedMsgs, btypes.ProcessedMsgs{ + Sender: sender, + Msgs: []sdk.Msg{msg}, + Timestamp: types.CurrentNanoTimestamp(), + Save: true, + }) + } + + for i, chunk := range chunks { + chunkData := executortypes.MarshalBatchDataChunk( + types.MustInt64ToUint64(bs.localBatchInfo.Start), + types.MustInt64ToUint64(bs.localBatchInfo.End), + types.MustInt64ToUint64(int64(i)), + types.MustInt64ToUint64(int64(len(checksums))), + chunk, + ) + msg, sender, err := bs.da.CreateBatchMsg(chunkData) + if err != nil { + return errors.Wrap(err, "failed to create batch msg") + } else if msg != nil { + bs.processedMsgs = append(bs.processedMsgs, btypes.ProcessedMsgs{ + Sender: sender, + Msgs: []sdk.Msg{msg}, + Timestamp: types.CurrentNanoTimestamp(), + Save: true, + }) + } + } + + ctx.Logger().Info("finalize batch", + zap.Int64("height", blockHeight), + zap.Int64("batch start", bs.localBatchInfo.Start), + zap.Int64("batch end", bs.localBatchInfo.End), + zap.Int64("batch size", bs.localBatchInfo.BatchSize), + zap.Int("chunks", len(checksums)), + zap.Int("txs", len(bs.processedMsgs)), + ) + return nil +} + +// checkBatch checks whether the batch should be finalized or not. +// if the block time is after the last submission time + submission interval * 2/3 +// or the block time is after the last submission time + max submission time +// or the batch file size is greater than (max chunks - 1) * max chunk size +// then finalize the batch +func (bs *BatchSubmitter) checkBatch(blockHeight int64, latestHeight int64, blockTime time.Time) bool { + if (blockHeight == latestHeight && blockTime.After(bs.localBatchInfo.LastSubmissionTime.Add(bs.bridgeInfo.BridgeConfig.SubmissionInterval*2/3))) || + (blockHeight == latestHeight && blockTime.After(bs.localBatchInfo.LastSubmissionTime.Add(time.Duration(bs.batchCfg.MaxSubmissionTime)*time.Second))) || + bs.localBatchInfo.BatchSize > (bs.batchCfg.MaxChunks-1)*bs.batchCfg.MaxChunkSize { + return true + } + return false +} + +// batchFileSize returns the size of the batch file. +func (bs *BatchSubmitter) batchFileSize(flush bool) (int64, error) { + if bs.batchFile == nil { + return 0, errors.New("batch file is not initialized") + } + if flush { + err := bs.batchWriter.Flush() + if err != nil { + return 0, errors.Wrap(err, "failed to flush batch writer") + } + } + + info, err := bs.batchFile.Stat() + if err != nil { + return 0, errors.Wrap(err, "failed to get batch file stat") + } + return info.Size(), nil +} + +func (bs *BatchSubmitter) emptyBatchFile() error { + // reset batch file + err := bs.batchFile.Truncate(0) + if err != nil { + return errors.Wrap(err, "failed to truncate batch file") + } + _, err = bs.batchFile.Seek(0, 0) + if err != nil { + return errors.Wrap(err, "failed to seek batch file") + } + return nil +} + +// prependLength prepends the length of the data to the data. +func prependLength(data []byte) []byte { + lengthBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(lengthBytes, uint64(len(data))) + return append(lengthBytes, data...) +} diff --git a/executor/batchsubmitter/batch_info.go b/executor/batchsubmitter/batch_info.go new file mode 100644 index 0000000..3e01106 --- /dev/null +++ b/executor/batchsubmitter/batch_info.go @@ -0,0 +1,59 @@ +package batchsubmitter + +import ( + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/types" +) + +// UpdateBatchInfo appends the batch info with the given chain, submitter, output index, and l2 block number +func (bs *BatchSubmitter) UpdateBatchInfo(chain string, submitter string, outputIndex uint64, l2BlockNumber int64) { + bs.batchInfoMu.Lock() + defer bs.batchInfoMu.Unlock() + + if len(bs.batchInfos) == 0 { + panic("batch info must be set before starting the batch submitter") + } + + // check if the batch info is already updated + if types.MustUint64ToInt64(bs.batchInfos[len(bs.batchInfos)-1].Output.L2BlockNumber) >= l2BlockNumber { + return + } + + bs.batchInfos = append(bs.batchInfos, ophosttypes.BatchInfoWithOutput{ + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_ChainType(ophosttypes.BatchInfo_ChainType_value["CHAIN_TYPE_"+chain]), + Submitter: submitter, + }, + Output: ophosttypes.Output{ + L2BlockNumber: types.MustInt64ToUint64(l2BlockNumber), + }, + }) +} + +// BatchInfo returns the current batch info +// There is always at least one batch info in the queue +func (bs *BatchSubmitter) BatchInfo() *ophosttypes.BatchInfoWithOutput { + bs.batchInfoMu.Lock() + defer bs.batchInfoMu.Unlock() + + return &bs.batchInfos[0] +} + +// NextBatchInfo returns the next batch info in the queue +func (bs *BatchSubmitter) NextBatchInfo() *ophosttypes.BatchInfoWithOutput { + bs.batchInfoMu.Lock() + defer bs.batchInfoMu.Unlock() + if len(bs.batchInfos) == 1 { + return nil + } + return &bs.batchInfos[1] +} + +// DequeueBatchInfo removes the first batch info from the queue +// There is always at least one batch info in the queue +func (bs *BatchSubmitter) DequeueBatchInfo() { + bs.batchInfoMu.Lock() + defer bs.batchInfoMu.Unlock() + + bs.batchInfos = bs.batchInfos[1:] +} diff --git a/executor/batchsubmitter/batch_info_test.go b/executor/batchsubmitter/batch_info_test.go new file mode 100644 index 0000000..31783a7 --- /dev/null +++ b/executor/batchsubmitter/batch_info_test.go @@ -0,0 +1,135 @@ +package batchsubmitter + +import ( + "sync" + "testing" + + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/db" + "github.com/initia-labs/opinit-bots/node" + "github.com/initia-labs/opinit-bots/node/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUpdateBatchInfo(t *testing.T) { + baseDB, err := db.NewMemDB() + require.NoError(t, err) + + batchDB := baseDB.WithPrefix([]byte("test_batch")) + appCodec, txConfig, err := childprovider.GetCodec("init") + require.NoError(t, err) + batchNode := node.NewTestNode(types.NodeConfig{}, batchDB, appCodec, txConfig, nil, nil) + + cases := []struct { + name string + existingBatchInfos []ophosttypes.BatchInfoWithOutput + chain string + submitter string + outputIndex uint64 + l2BlockNumber int64 + expectedBatchInfos []ophosttypes.BatchInfoWithOutput + panic bool + }{ + { + name: "success", + existingBatchInfos: []ophosttypes.BatchInfoWithOutput{ + { + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, + Submitter: "submitter0", + }, + Output: ophosttypes.Output{ + L2BlockNumber: 100, + }, + }, + }, + chain: "INITIA", + submitter: "submitter1", + outputIndex: 1, + l2BlockNumber: 110, + expectedBatchInfos: []ophosttypes.BatchInfoWithOutput{ + { + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, + Submitter: "submitter0", + }, + Output: ophosttypes.Output{ + L2BlockNumber: 100, + }, + }, + { + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, + Submitter: "submitter1", + }, + Output: ophosttypes.Output{ + L2BlockNumber: 110, + }, + }, + }, + panic: false, + }, + { + name: "past l2 block number", + existingBatchInfos: []ophosttypes.BatchInfoWithOutput{ + { + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, + Submitter: "submitter0", + }, + Output: ophosttypes.Output{ + L2BlockNumber: 100, + }, + }, + }, + chain: "INITIA", + submitter: "submitter1", + outputIndex: 1, + l2BlockNumber: 90, + expectedBatchInfos: []ophosttypes.BatchInfoWithOutput{ + { + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, + Submitter: "submitter0", + }, + Output: ophosttypes.Output{ + L2BlockNumber: 100, + }, + }, + }, + panic: false, + }, + { + name: "empty batch infos", + existingBatchInfos: []ophosttypes.BatchInfoWithOutput{}, + chain: "test_chain", + submitter: "test_submitter", + outputIndex: 1, + l2BlockNumber: 1, + expectedBatchInfos: []ophosttypes.BatchInfoWithOutput{}, + panic: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + batchSubmitter := BatchSubmitter{ + node: batchNode, + batchInfoMu: &sync.Mutex{}, + batchInfos: tc.existingBatchInfos, + } + + if tc.panic { + require.Panics(t, func() { + batchSubmitter.UpdateBatchInfo(tc.chain, tc.submitter, tc.outputIndex, tc.l2BlockNumber) + }) + return + } + + batchSubmitter.UpdateBatchInfo(tc.chain, tc.submitter, tc.outputIndex, tc.l2BlockNumber) + assert.Equal(t, tc.expectedBatchInfos, batchSubmitter.batchInfos) + }) + } +} diff --git a/executor/batch/batch.go b/executor/batchsubmitter/batch_submitter.go similarity index 72% rename from executor/batch/batch.go rename to executor/batchsubmitter/batch_submitter.go index dd05045..b9c6c0f 100644 --- a/executor/batch/batch.go +++ b/executor/batchsubmitter/batch_submitter.go @@ -1,15 +1,13 @@ -package batch +package batchsubmitter import ( "compress/gzip" "context" - "errors" "os" "sync" "go.uber.org/zap" - opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" executortypes "github.com/initia-labs/opinit-bots/executor/types" @@ -18,6 +16,7 @@ import ( nodetypes "github.com/initia-labs/opinit-bots/node/types" childprovider "github.com/initia-labs/opinit-bots/provider/child" "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" ) type hostNode interface { @@ -35,10 +34,6 @@ type BatchSubmitter struct { cfg nodetypes.NodeConfig batchCfg executortypes.BatchConfig - db types.DB - logger *zap.Logger - - opchildQueryClient opchildtypes.QueryClient batchInfoMu *sync.Mutex batchInfos []ophosttypes.BatchInfoWithOutput @@ -48,8 +43,9 @@ type BatchSubmitter struct { processedMsgs []btypes.ProcessedMsgs - chainID string - homePath string + chainID string + + stage types.CommitDB // status info LastBatchEndBlockNumber int64 @@ -58,8 +54,8 @@ type BatchSubmitter struct { func NewBatchSubmitterV1( cfg nodetypes.NodeConfig, batchCfg executortypes.BatchConfig, - db types.DB, logger *zap.Logger, - chainID, homePath string, + db types.DB, + chainID string, ) *BatchSubmitter { appCodec, txConfig, err := childprovider.GetCodec(cfg.Bech32Prefix) if err != nil { @@ -68,9 +64,9 @@ func NewBatchSubmitterV1( cfg.BroadcasterConfig = nil cfg.ProcessType = nodetypes.PROCESS_TYPE_RAW - node, err := node.NewNode(cfg, db, logger, appCodec, txConfig) + node, err := node.NewNode(cfg, db, appCodec, txConfig) if err != nil { - panic(err) + panic(errors.Wrap(err, "failed to create node")) } ch := &BatchSubmitter{ @@ -81,32 +77,28 @@ func NewBatchSubmitterV1( cfg: cfg, batchCfg: batchCfg, - db: db, - logger: logger, - - opchildQueryClient: opchildtypes.NewQueryClient(node.GetRPCClient()), - batchInfoMu: &sync.Mutex{}, localBatchInfo: &executortypes.LocalBatchInfo{}, processedMsgs: make([]btypes.ProcessedMsgs, 0), - homePath: homePath, chainID: chainID, + + stage: db.NewStage(), } return ch } -func (bs *BatchSubmitter) Initialize(ctx context.Context, processedHeight int64, host hostNode, bridgeInfo ophosttypes.QueryBridgeResponse) error { - err := bs.node.Initialize(ctx, processedHeight, nil) +func (bs *BatchSubmitter) Initialize(ctx types.Context, syncedHeight int64, host hostNode, bridgeInfo ophosttypes.QueryBridgeResponse) error { + err := bs.node.Initialize(ctx, syncedHeight, nil) if err != nil { - return err + return errors.Wrap(err, "failed to initialize node") } bs.host = host bs.bridgeInfo = bridgeInfo res, err := bs.host.QueryBatchInfos(ctx, bridgeInfo.BridgeId) if err != nil { - return err + return errors.Wrap(err, "failed to query batch infos") } bs.batchInfos = res.BatchInfos if len(bs.batchInfos) == 0 { @@ -120,34 +112,30 @@ func (bs *BatchSubmitter) Initialize(ctx context.Context, processedHeight int64, } fileFlag := os.O_CREATE | os.O_RDWR | os.O_APPEND - bs.batchFile, err = os.OpenFile(bs.homePath+"/batch", fileFlag, 0640) + bs.batchFile, err = os.OpenFile(ctx.HomePath()+"/batch", fileFlag, 0640) if err != nil { - return err + return errors.Wrap(err, "failed to open batch file") } if bs.node.HeightInitialized() { bs.localBatchInfo.Start = bs.node.GetHeight() bs.localBatchInfo.End = 0 - bs.localBatchInfo.BatchFileSize = 0 + bs.localBatchInfo.BatchSize = 0 - err = bs.saveLocalBatchInfo() + err = SaveLocalBatchInfo(bs.DB(), *bs.localBatchInfo) if err != nil { - return err + return errors.Wrap(err, "failed to save local batch info") } // reset batch file - err := bs.batchFile.Truncate(0) - if err != nil { - return err - } - _, err = bs.batchFile.Seek(0, 0) + err = bs.emptyBatchFile() if err != nil { - return err + return errors.Wrap(err, "failed to empty batch file") } } // linux command gzip use level 6 as default bs.batchWriter, err = gzip.NewWriterLevel(bs.batchFile, 6) if err != nil { - return err + return errors.Wrap(err, "failed to create gzip writer") } bs.node.RegisterRawBlockHandler(bs.rawBlockHandler) @@ -158,8 +146,8 @@ func (bs *BatchSubmitter) SetDANode(da executortypes.DANode) { bs.da = da } -func (bs *BatchSubmitter) Start(ctx context.Context) { - bs.logger.Info("batch start", zap.Int64("height", bs.node.GetHeight())) +func (bs *BatchSubmitter) Start(ctx types.Context) { + ctx.Logger().Info("batch start", zap.Int64("height", bs.node.GetHeight())) bs.node.Start(ctx) } @@ -187,3 +175,7 @@ func (bs *BatchSubmitter) DA() executortypes.DANode { func (bs BatchSubmitter) Node() *node.Node { return bs.node } + +func (bs BatchSubmitter) DB() types.DB { + return bs.node.DB() +} diff --git a/executor/batchsubmitter/batch_test.go b/executor/batchsubmitter/batch_test.go new file mode 100644 index 0000000..99aeb57 --- /dev/null +++ b/executor/batchsubmitter/batch_test.go @@ -0,0 +1,740 @@ +package batchsubmitter + +import ( + "compress/gzip" + "context" + "fmt" + "os" + "sync" + "testing" + "time" + + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + client "github.com/initia-labs/opinit-bots/client" + mockclient "github.com/initia-labs/opinit-bots/client/mock" + "github.com/initia-labs/opinit-bots/db" + executortypes "github.com/initia-labs/opinit-bots/executor/types" + "github.com/initia-labs/opinit-bots/node" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + "github.com/initia-labs/opinit-bots/node/rpcclient" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + hostprovider "github.com/initia-labs/opinit-bots/provider/host" + "github.com/initia-labs/opinit-bots/types" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func TestPrepareBatch(t *testing.T) { + appCodec, txConfig, err := childprovider.GetCodec("init") + require.NoError(t, err) + + cases := []struct { + name string + existingLocalBatchInfo executortypes.LocalBatchInfo + batchInfoQueue []ophosttypes.BatchInfoWithOutput + blockHeight int64 + expectedLocalBatchInfo executortypes.LocalBatchInfo + expectedChanges []types.KV + err bool + panic bool + }{ + { + name: "not finalized batch info", + existingLocalBatchInfo: executortypes.LocalBatchInfo{ + Start: 1, + End: 0, + LastSubmissionTime: time.Time{}, + BatchSize: 100, + }, + batchInfoQueue: []ophosttypes.BatchInfoWithOutput{ + { + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, + Submitter: "submitter0", + }, + }, + }, + blockHeight: 110, + expectedLocalBatchInfo: executortypes.LocalBatchInfo{ + Start: 1, + End: 0, + LastSubmissionTime: time.Time{}, + BatchSize: 100, + }, + expectedChanges: []types.KV{}, + err: false, + panic: false, + }, + { + name: "finalized batch info", + existingLocalBatchInfo: executortypes.LocalBatchInfo{ + Start: 1, + End: 100, + LastSubmissionTime: time.Unix(0, 10000).UTC(), + BatchSize: 100, + }, + batchInfoQueue: []ophosttypes.BatchInfoWithOutput{ + { + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, + Submitter: "submitter0", + }, + }, + }, + blockHeight: 101, + expectedLocalBatchInfo: executortypes.LocalBatchInfo{ + Start: 101, + End: 0, + LastSubmissionTime: time.Unix(0, 10000).UTC(), + BatchSize: 0, + }, + expectedChanges: []types.KV{}, + err: false, + panic: false, + }, + { + name: "existing next batch info, not reached to the l2 block number of the next batch info", + existingLocalBatchInfo: executortypes.LocalBatchInfo{ + Start: 1, + End: 0, + LastSubmissionTime: time.Time{}, + BatchSize: 100, + }, + batchInfoQueue: []ophosttypes.BatchInfoWithOutput{ + { + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, + Submitter: "submitter0", + }, + }, + { + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_CELESTIA, + Submitter: "submitter0", + }, + Output: ophosttypes.Output{ + L2BlockNumber: 200, + }, + }, + }, + blockHeight: 101, + expectedLocalBatchInfo: executortypes.LocalBatchInfo{ + Start: 1, + End: 0, + LastSubmissionTime: time.Time{}, + BatchSize: 100, + }, + expectedChanges: []types.KV{}, + err: false, + panic: false, + }, + { + name: "existing next batch info, reached to the l2 block number of the next batch info", + existingLocalBatchInfo: executortypes.LocalBatchInfo{ + Start: 51, + End: 0, + LastSubmissionTime: time.Unix(0, 10000).UTC(), + BatchSize: 100, + }, + batchInfoQueue: []ophosttypes.BatchInfoWithOutput{ + { + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, + Submitter: "submitter0", + }, + }, + { + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_CELESTIA, + Submitter: "submitter0", + }, + Output: ophosttypes.Output{ + L2BlockNumber: 40, + }, + }, + }, + blockHeight: 110, + expectedLocalBatchInfo: executortypes.LocalBatchInfo{ + Start: 41, + End: 0, + LastSubmissionTime: time.Unix(0, 10000).UTC(), + BatchSize: 0, + }, + expectedChanges: []types.KV{ + { + Key: []byte("test_batch/local_batch_info"), + Value: []byte(`{"start":41,"end":0,"last_submission_time":"1970-01-01T00:00:00.00001Z","batch_size":0}`), + }, + { + Key: []byte("test_batch/synced_height"), + Value: []byte("40"), + }, + }, + err: false, + panic: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + baseDB, err := db.NewMemDB() + require.NoError(t, err) + + batchDB := baseDB.WithPrefix([]byte("test_batch")) + batchNode := node.NewTestNode(nodetypes.NodeConfig{}, batchDB, appCodec, txConfig, nil, nil) + + batchSubmitter := BatchSubmitter{ + node: batchNode, + batchInfoMu: &sync.Mutex{}, + batchInfos: tc.batchInfoQueue, + } + + err = SaveLocalBatchInfo(batchDB, tc.existingLocalBatchInfo) + require.NoError(t, err) + + batchSubmitter.batchFile, err = os.CreateTemp("", "batchfile") + require.NoError(t, err) + defer os.Remove(batchSubmitter.batchFile.Name()) + + batchSubmitter.batchWriter, err = gzip.NewWriterLevel(batchSubmitter.batchFile, 6) + require.NoError(t, err) + defer batchSubmitter.batchWriter.Close() + + if tc.panic { + require.Panics(t, func() { + batchSubmitter.prepareBatch(tc.blockHeight) //nolint:errcheck + }) + for _, expectedKV := range tc.expectedChanges { + value, err := baseDB.Get(expectedKV.Key) + require.NoError(t, err) + require.Equal(t, expectedKV.Value, value) + } + require.Equal(t, tc.expectedLocalBatchInfo, *batchSubmitter.localBatchInfo) + fileSize, err := batchSubmitter.batchFileSize(true) + require.NoError(t, err) + require.Equal(t, int64(0), fileSize) + } else { + err := batchSubmitter.prepareBatch(tc.blockHeight) + if tc.err { + require.Error(t, err) + } else { + require.NoError(t, err) + for _, expectedKV := range tc.expectedChanges { + value, err := baseDB.Get(expectedKV.Key) + require.NoError(t, err) + require.Equal(t, expectedKV.Value, value) + } + require.Equal(t, tc.expectedLocalBatchInfo, *batchSubmitter.localBatchInfo) + } + } + }) + } +} + +func TestHandleBatch(t *testing.T) { + var err error + + batchSubmitter := BatchSubmitter{} + batchSubmitter.batchFile, err = os.CreateTemp("", "batchfile") + require.NoError(t, err) + defer os.Remove(batchSubmitter.batchFile.Name()) + + batchSubmitter.batchWriter, err = gzip.NewWriterLevel(batchSubmitter.batchFile, 6) + require.NoError(t, err) + defer batchSubmitter.batchWriter.Close() + + blockBytes := []byte("block_bytes") + n, err := batchSubmitter.handleBatch(blockBytes) + require.NoError(t, err) + + require.Equal(t, n, 11+8) // len(block_bytes) + len(length_prefix) + + blockBytes = []byte("") + _, err = batchSubmitter.handleBatch(blockBytes) + require.Error(t, err) +} + +func TestFinalizeBatch(t *testing.T) { + baseDB, err := db.NewMemDB() + require.NoError(t, err) + + batchDB := baseDB.WithPrefix([]byte("test_batch")) + daDB := baseDB.WithPrefix([]byte("test_da")) + + appCodec, txConfig, err := childprovider.GetCodec("init") + require.NoError(t, err) + + mockCaller := mockclient.NewMockCaller() + rpcClient := rpcclient.NewRPCClientWithClient(appCodec, client.NewWithCaller(mockCaller)) + batchNode := node.NewTestNode(nodetypes.NodeConfig{}, batchDB, appCodec, txConfig, rpcClient, nil) + + hostCdc, _, err := hostprovider.GetCodec("init") + require.NoError(t, err) + + mockDA := NewMockDA(daDB, hostCdc, 1, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0") + + batchConfig := executortypes.BatchConfig{ + MaxChunks: 100, + MaxChunkSize: 10, + } + + batchSubmitter := BatchSubmitter{ + node: batchNode, + da: mockDA, + batchCfg: batchConfig, + localBatchInfo: &executortypes.LocalBatchInfo{ + Start: 1, + End: 10, + }, + processedMsgs: make([]btypes.ProcessedMsgs, 0), + } + batchSubmitter.batchFile, err = os.CreateTemp("", "batchfile") + require.NoError(t, err) + defer os.Remove(batchSubmitter.batchFile.Name()) + + batchSubmitter.batchWriter, err = gzip.NewWriterLevel(batchSubmitter.batchFile, 6) + require.NoError(t, err) + defer batchSubmitter.batchWriter.Close() + + mockCaller.SetRawCommit(10, []byte("commit_bytes")) + + logger, observedLogs := logCapturer() + ctx := types.NewContext(context.TODO(), logger, "") + + for i := 0; i < 10; i++ { + _, err := batchSubmitter.batchWriter.Write([]byte(fmt.Sprintf("block_bytes%d", i))) + if err != nil { + require.NoError(t, err) + } + } + + mockCount := int64(0) + mockTimestampFetcher := func() int64 { + mockCount++ + return mockCount + } + types.CurrentNanoTimestamp = mockTimestampFetcher + + err = batchSubmitter.finalizeBatch(ctx, 10) + require.NoError(t, err) + + logs := observedLogs.TakeAll() + require.Len(t, logs, 1) + + require.Equal(t, "finalize batch", logs[0].Message) + require.Equal(t, []zapcore.Field{ + zap.Int64("height", 10), + zap.Int64("batch start", 1), + zap.Int64("batch end", 10), + zap.Int64("batch size", 73), + zap.Int("chunks", 8), + zap.Int("txs", 9), + }, logs[0].Context) + + require.Len(t, batchSubmitter.processedMsgs, 9) + + require.Equal(t, btypes.ProcessedMsgs{ + Sender: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + Msgs: []sdk.Msg{ + &ophosttypes.MsgRecordBatch{ + Submitter: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + BridgeId: 1, + BatchBytes: []byte{ + 0x00, // type header + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // start 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, // end 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, // chunks length 8 + 0x21, 0x7f, 0xeb, 0x1e, 0x74, 0x90, 0x01, 0x5d, 0xd0, 0xa2, 0xb2, 0x31, 0xb9, 0xce, 0xa4, 0x58, 0x04, 0xdf, 0x3d, 0x2a, 0x9b, 0x37, 0x28, 0x7a, 0xc8, 0x61, 0xbb, 0x45, 0xb8, 0xc0, 0xde, 0x55, // checksum[0] + 0x08, 0x95, 0x3b, 0xa5, 0xb6, 0x25, 0xa4, 0x7b, 0x3e, 0xc0, 0xbe, 0xe6, 0x71, 0xab, 0xa1, 0x05, 0x62, 0x01, 0x8a, 0x03, 0xf3, 0xb8, 0xed, 0x84, 0x22, 0xa0, 0x22, 0x78, 0x61, 0xff, 0xdd, 0x7e, // checksum[1] + 0x1c, 0x65, 0xb4, 0x2e, 0x46, 0x1f, 0xb4, 0x37, 0x96, 0xa8, 0x12, 0x2a, 0x7e, 0xf7, 0xaf, 0x4d, 0xa4, 0x15, 0x8c, 0xe7, 0x92, 0x87, 0x2e, 0xbc, 0xc6, 0xd5, 0xe7, 0x61, 0x38, 0x1e, 0x92, 0xea, // checksum[2] + 0xad, 0x11, 0x06, 0x84, 0x21, 0x2f, 0xd0, 0x05, 0xe7, 0xaa, 0xc9, 0x04, 0x22, 0x7e, 0x1b, 0xc3, 0x04, 0x57, 0x52, 0x64, 0x34, 0xdf, 0x02, 0x56, 0x8d, 0x60, 0x09, 0x4b, 0xa9, 0x5e, 0x3c, 0x70, // checksum[3] + 0xb0, 0x60, 0x0f, 0x49, 0xee, 0x80, 0xc2, 0x63, 0xec, 0xc4, 0x4e, 0xfa, 0x76, 0x2f, 0x87, 0xde, 0x40, 0x6c, 0xd5, 0x4e, 0x68, 0x17, 0xbc, 0x6c, 0x46, 0x1c, 0x74, 0x44, 0x4f, 0xc2, 0x15, 0xb8, // checksum[4] + 0xbe, 0x47, 0xc7, 0x55, 0xab, 0xe2, 0x47, 0x27, 0x22, 0x01, 0xc0, 0xc4, 0xbd, 0xe3, 0xe6, 0xb8, 0xf3, 0x84, 0x13, 0x51, 0x71, 0x10, 0x99, 0x96, 0xab, 0x05, 0x19, 0xca, 0xa0, 0x0e, 0x47, 0xfc, // checksum[5] + 0x18, 0x1b, 0xef, 0x86, 0x5a, 0x29, 0x8e, 0xc0, 0xc9, 0xcb, 0xb6, 0xe0, 0x87, 0xe0, 0x3c, 0x7d, 0x5f, 0x0c, 0xf1, 0xa3, 0x32, 0x0e, 0x11, 0xce, 0x3f, 0x27, 0xb2, 0x30, 0x8f, 0x41, 0x72, 0x51, // checksum[6] + 0x70, 0x9e, 0x80, 0xc8, 0x84, 0x87, 0xa2, 0x41, 0x1e, 0x1e, 0xe4, 0xdf, 0xb9, 0xf2, 0x2a, 0x86, 0x14, 0x92, 0xd2, 0x0c, 0x47, 0x65, 0x15, 0x0c, 0x0c, 0x79, 0x4a, 0xbd, 0x70, 0xf8, 0x14, 0x7c, // checksum[7] + }, + }, + }, + Timestamp: 1, + Save: true, + }, batchSubmitter.processedMsgs[0]) + + require.Equal(t, btypes.ProcessedMsgs{ + Sender: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + Msgs: []sdk.Msg{ + &ophosttypes.MsgRecordBatch{ + Submitter: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + BridgeId: 1, + BatchBytes: []byte{ + 0x01, // type chunk + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // start 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, // end 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // index 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, // chunks length 8 + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, // chunkdata + }, + }, + }, + Timestamp: 2, + Save: true, + }, batchSubmitter.processedMsgs[1]) + + require.Equal(t, btypes.ProcessedMsgs{ + Sender: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + Msgs: []sdk.Msg{ + &ophosttypes.MsgRecordBatch{ + Submitter: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + BridgeId: 1, + BatchBytes: []byte{ + 0x01, // type chunk + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // start 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, // end 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // index 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, // chunks length 8 + 0x4a, 0xca, 0xc9, 0x4f, 0xce, 0x8e, 0x4f, 0xaa, 0x2c, 0x49, // chunkdata + }, + }, + }, + Timestamp: 3, + Save: true, + }, batchSubmitter.processedMsgs[2]) + + require.Equal(t, btypes.ProcessedMsgs{ + Sender: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + Msgs: []sdk.Msg{ + &ophosttypes.MsgRecordBatch{ + Submitter: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + BridgeId: 1, + BatchBytes: []byte{ + 0x01, // type chunk + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // start 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, // end 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, // index 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, // chunks length 8 + 0x2d, 0x36, 0x40, 0x62, 0x1b, 0x22, 0xb1, 0x8d, 0x90, 0xd8, // chunkdata + }, + }, + }, + Timestamp: 4, + Save: true, + }, batchSubmitter.processedMsgs[3]) + + require.Equal(t, btypes.ProcessedMsgs{ + Sender: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + Msgs: []sdk.Msg{ + &ophosttypes.MsgRecordBatch{ + Submitter: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + BridgeId: 1, + BatchBytes: []byte{ + 0x01, // type chunk + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // start 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, // end 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, // index 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, // chunks length 8 + 0xc6, 0x48, 0x6c, 0x13, 0x24, 0xb6, 0x29, 0x12, 0xdb, 0x0c, // chunkdata + }, + }, + }, + Timestamp: 5, + Save: true, + }, batchSubmitter.processedMsgs[4]) + + require.Equal(t, btypes.ProcessedMsgs{ + Sender: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + Msgs: []sdk.Msg{ + &ophosttypes.MsgRecordBatch{ + Submitter: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + BridgeId: 1, + BatchBytes: []byte{ + 0x01, // type chunk + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // start 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, // end 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, // index 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, // chunks length 8 + 0x89, 0x6d, 0x8e, 0xc4, 0xb6, 0x40, 0x62, 0x5b, 0xf2, 0x30, // chunkdata + }, + }, + }, + Timestamp: 6, + Save: true, + }, batchSubmitter.processedMsgs[5]) + + require.Equal(t, btypes.ProcessedMsgs{ + Sender: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + Msgs: []sdk.Msg{ + &ophosttypes.MsgRecordBatch{ + Submitter: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + BridgeId: 1, + BatchBytes: []byte{ + 0x01, // type chunk + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // start 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, // end 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, // index 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, // chunks length 8 + 0x40, 0x40, 0x72, 0x7e, 0x6e, 0x6e, 0x66, 0x09, 0x44, 0x10, // chunkdata + }, + }, + }, + Timestamp: 7, + Save: true, + }, batchSubmitter.processedMsgs[6]) + + require.Equal(t, btypes.ProcessedMsgs{ + Sender: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + Msgs: []sdk.Msg{ + &ophosttypes.MsgRecordBatch{ + Submitter: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + BridgeId: 1, + BatchBytes: []byte{ + 0x01, // type chunk + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // start 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, // end 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, // index 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, // chunks length 8 + 0x10, 0x00, 0x00, 0xff, 0xff, 0x92, 0x7b, 0x8a, 0x85, 0x8c, // chunkdata + }, + }, + }, + Timestamp: 8, + Save: true, + }, batchSubmitter.processedMsgs[7]) + + require.Equal(t, btypes.ProcessedMsgs{ + Sender: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + Msgs: []sdk.Msg{ + &ophosttypes.MsgRecordBatch{ + Submitter: "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", + BridgeId: 1, + BatchBytes: []byte{ + 0x01, // type chunk + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // start 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, // end 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, // index 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, // chunks length 8 + 0x00, 0x00, 0x00, // chunkdata + }, + }, + }, + Timestamp: 9, + Save: true, + }, batchSubmitter.processedMsgs[8]) +} + +func TestCheckBatch(t *testing.T) { + batchSubmitter := BatchSubmitter{ + bridgeInfo: ophosttypes.QueryBridgeResponse{ + BridgeConfig: ophosttypes.BridgeConfig{ + SubmissionInterval: 15000, + }, + }, + } + + cases := []struct { + name string + localBatchInfo *executortypes.LocalBatchInfo + batchConfig executortypes.BatchConfig + blockHeight int64 + latestHeight int64 + blockTime time.Time + expected bool + }{ + { + name: "block time >= last submission time + 2/3 interval, block height == latest height", + localBatchInfo: &executortypes.LocalBatchInfo{ + Start: 1, + LastSubmissionTime: time.Unix(0, 0).UTC(), + }, + batchConfig: executortypes.BatchConfig{ + MaxChunks: 100, + MaxChunkSize: 10, + MaxSubmissionTime: 1, // time.Second + }, + blockHeight: 10, + latestHeight: 10, + blockTime: time.Unix(0, 10001).UTC(), + expected: true, + }, + { + name: "block time >= last submission time + 2/3 interval, block height < latest height", + localBatchInfo: &executortypes.LocalBatchInfo{ + Start: 1, + LastSubmissionTime: time.Unix(0, 0), + }, + batchConfig: executortypes.BatchConfig{ + MaxChunks: 100, + MaxChunkSize: 10, + MaxSubmissionTime: 1, + }, + blockHeight: 10, + latestHeight: 20, + blockTime: time.Unix(0, 10001).UTC(), + expected: false, + }, + { + name: "block time < last submission time + 2/3 interval, block height == latest height", + localBatchInfo: &executortypes.LocalBatchInfo{ + Start: 1, + LastSubmissionTime: time.Unix(0, 10000).UTC(), + }, + batchConfig: executortypes.BatchConfig{ + MaxChunks: 100, + MaxChunkSize: 10, + MaxSubmissionTime: 1, + }, + blockHeight: 10, + latestHeight: 10, + blockTime: time.Unix(0, 10001).UTC(), + expected: false, + }, + { + name: "block time > last submission time + max submission time, block height == latest height", + localBatchInfo: &executortypes.LocalBatchInfo{ + Start: 1, + LastSubmissionTime: time.Unix(0, 0).UTC(), + }, + batchConfig: executortypes.BatchConfig{ + MaxChunks: 100, + MaxChunkSize: 10, + MaxSubmissionTime: 1, + }, + blockHeight: 10, + latestHeight: 10, + blockTime: time.Unix(0, 1000*1000*1000+1).UTC(), + expected: true, + }, + { + name: "block time > last submission time + max submission time, block height != latest height", + localBatchInfo: &executortypes.LocalBatchInfo{ + Start: 1, + LastSubmissionTime: time.Unix(0, 0).UTC(), + }, + batchConfig: executortypes.BatchConfig{ + MaxChunks: 100, + MaxChunkSize: 10, + MaxSubmissionTime: 1, + }, + blockHeight: 10, + latestHeight: 20, + blockTime: time.Unix(0, 1000*1000*1000+1).UTC(), + expected: false, + }, + { + name: "block time < last submission time + max submission time, block height == latest height", + localBatchInfo: &executortypes.LocalBatchInfo{ + Start: 1, + LastSubmissionTime: time.Unix(0, 0).UTC(), + }, + batchConfig: executortypes.BatchConfig{ + MaxChunks: 100, + MaxChunkSize: 10, + MaxSubmissionTime: 1, + }, + blockHeight: 10, + latestHeight: 10, + blockTime: time.Unix(0, 1000*1000*1000).UTC(), + expected: true, + }, + { + name: "batch size >= (max chunks - 1) * max chunk size", + localBatchInfo: &executortypes.LocalBatchInfo{ + Start: 1, + LastSubmissionTime: time.Unix(0, 0).UTC(), + BatchSize: 1000, + }, + batchConfig: executortypes.BatchConfig{ + MaxChunks: 100, + MaxChunkSize: 10, + MaxSubmissionTime: 1, + }, + blockHeight: 10, + latestHeight: 20, + blockTime: time.Unix(0, 500).UTC(), + expected: true, + }, + { + name: "batch size < (max chunks - 1) * max chunk size", + localBatchInfo: &executortypes.LocalBatchInfo{ + Start: 1, + LastSubmissionTime: time.Unix(0, 0), + BatchSize: 10, + }, + batchConfig: executortypes.BatchConfig{ + MaxChunks: 100, + MaxChunkSize: 10, + MaxSubmissionTime: 1, + }, + blockHeight: 10, + latestHeight: 20, + blockTime: time.Unix(0, 500).UTC(), + expected: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + batchSubmitter.localBatchInfo = tc.localBatchInfo + batchSubmitter.batchCfg = tc.batchConfig + actual := batchSubmitter.checkBatch(tc.blockHeight, tc.latestHeight, tc.blockTime) + require.Equal(t, tc.expected, actual) + }) + } +} + +func TestBatchFileSize(t *testing.T) { + var err error + + batchSubmitter := BatchSubmitter{} + batchSubmitter.batchFile, err = os.CreateTemp("", "batchfile") + require.NoError(t, err) + defer os.Remove(batchSubmitter.batchFile.Name()) + + batchSubmitter.batchWriter, err = gzip.NewWriterLevel(batchSubmitter.batchFile, 6) + require.NoError(t, err) + defer batchSubmitter.batchWriter.Close() + + fileSize, err := batchSubmitter.batchFileSize(false) + require.NoError(t, err) + require.Equal(t, int64(0), fileSize) + + n, err := batchSubmitter.batchFile.Write([]byte("batch_bytes")) + require.NoError(t, err) + + require.Equal(t, 11, n) + fileSize, err = batchSubmitter.batchFileSize(false) + require.NoError(t, err) + require.Equal(t, int64(11), fileSize) +} + +func TestEmptyBatchFile(t *testing.T) { + var err error + + batchSubmitter := BatchSubmitter{} + batchSubmitter.batchFile, err = os.CreateTemp("", "batchfile") + require.NoError(t, err) + defer os.Remove(batchSubmitter.batchFile.Name()) + + _, err = batchSubmitter.batchFile.Write([]byte("batch_bytes")) + require.NoError(t, err) + + fileSize, err := batchSubmitter.batchFileSize(false) + require.NoError(t, err) + require.Equal(t, int64(11), fileSize) + + err = batchSubmitter.emptyBatchFile() + require.NoError(t, err) + + fileSize, err = batchSubmitter.batchFileSize(false) + require.NoError(t, err) + require.Equal(t, int64(0), fileSize) +} + +func TestPrependLength(t *testing.T) { + batchBytes := []byte("batch_bytes") + lengthPrefixed := prependLength(batchBytes) + + require.Equal(t, append([]byte{0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, batchBytes...), lengthPrefixed) +} diff --git a/executor/batchsubmitter/common_test.go b/executor/batchsubmitter/common_test.go new file mode 100644 index 0000000..b0d84b2 --- /dev/null +++ b/executor/batchsubmitter/common_test.go @@ -0,0 +1,90 @@ +package batchsubmitter + +import ( + "context" + + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + executortypes "github.com/initia-labs/opinit-bots/executor/types" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + "github.com/initia-labs/opinit-bots/types" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type mockHost struct { + batchInfos []ophosttypes.BatchInfoWithOutput +} + +func NewMockHost(batchInfos []ophosttypes.BatchInfoWithOutput) *mockHost { + return &mockHost{ + batchInfos: batchInfos, + } +} + +func (m *mockHost) QueryBatchInfos(ctx context.Context, bridgeId uint64) (*ophosttypes.QueryBatchInfosResponse, error) { + return &ophosttypes.QueryBatchInfosResponse{ + BatchInfos: m.batchInfos, + }, nil +} + +var _ hostNode = (*mockHost)(nil) + +type mockDA struct { + db types.DB + cdc codec.Codec + bridgeId uint64 + baseAccount string + processedMsgs []btypes.ProcessedMsgs +} + +func NewMockDA(db types.DB, cdc codec.Codec, bridgeId uint64, baseAccount string) *mockDA { + return &mockDA{ + db: db, + cdc: cdc, + bridgeId: bridgeId, + baseAccount: baseAccount, + } +} +func (m mockDA) Start(_ types.Context) {} + +func (m *mockDA) DB() types.DB { + return m.db +} + +func (m *mockDA) Codec() codec.Codec { + return m.cdc +} + +func (m mockDA) HasBroadcaster() bool { + return m.baseAccount != "" +} +func (m mockDA) CreateBatchMsg(batchBytes []byte) (sdk.Msg, string, error) { + if m.baseAccount == "" { + return nil, "", nil + } + + msg := ophosttypes.NewMsgRecordBatch( + m.baseAccount, + m.bridgeId, + batchBytes, + ) + return msg, m.baseAccount, nil +} +func (m mockDA) GetNodeStatus() (nodetypes.Status, error) { + return nodetypes.Status{}, nil +} + +func (m *mockDA) BroadcastProcessedMsgs(msgs ...btypes.ProcessedMsgs) { + m.processedMsgs = append(m.processedMsgs, msgs...) +} + +var _ executortypes.DANode = (*mockDA)(nil) + +func logCapturer() (*zap.Logger, *observer.ObservedLogs) { + core, logs := observer.New(zap.DebugLevel) + return zap.New(core), logs +} diff --git a/executor/batchsubmitter/db.go b/executor/batchsubmitter/db.go new file mode 100644 index 0000000..9604d5e --- /dev/null +++ b/executor/batchsubmitter/db.go @@ -0,0 +1,33 @@ +package batchsubmitter + +import ( + dbtypes "github.com/initia-labs/opinit-bots/db/types" + executortypes "github.com/initia-labs/opinit-bots/executor/types" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" +) + +// GetLocalBatchInfo returns the local batch info from the given db. +// If the local batch info is not found, it returns an empty struct. +func GetLocalBatchInfo(db types.BasicDB) (executortypes.LocalBatchInfo, error) { + val, err := db.Get(executortypes.LocalBatchInfoKey) + if err != nil { + if errors.Is(err, dbtypes.ErrNotFound) { + return executortypes.LocalBatchInfo{}, nil + } + return executortypes.LocalBatchInfo{}, err + } + + localBatchInfo := executortypes.LocalBatchInfo{} + err = localBatchInfo.Unmarshal(val) + return localBatchInfo, err +} + +// SaveLocalBatchInfo saves the local batch info to the given db. +func SaveLocalBatchInfo(db types.BasicDB, localBatchInfo executortypes.LocalBatchInfo) error { + bz, err := localBatchInfo.Value() + if err != nil { + return err + } + return db.Set(localBatchInfo.Key(), bz) +} diff --git a/executor/batchsubmitter/db_test.go b/executor/batchsubmitter/db_test.go new file mode 100644 index 0000000..b37e612 --- /dev/null +++ b/executor/batchsubmitter/db_test.go @@ -0,0 +1,33 @@ +package batchsubmitter + +import ( + "testing" + "time" + + "github.com/initia-labs/opinit-bots/db" + executortypes "github.com/initia-labs/opinit-bots/executor/types" + "github.com/stretchr/testify/require" +) + +func TestSaveGetLocalBatchInfo(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + info, err := GetLocalBatchInfo(db) + require.NoError(t, err) + require.Equal(t, executortypes.LocalBatchInfo{}, info) + + localBatchInfo := executortypes.LocalBatchInfo{ + Start: 1, + End: 2, + LastSubmissionTime: time.Unix(0, 10000).UTC(), + BatchSize: 1000, + } + + err = SaveLocalBatchInfo(db, localBatchInfo) + require.NoError(t, err) + + info, err = GetLocalBatchInfo(db) + require.NoError(t, err) + require.Equal(t, localBatchInfo, info) +} diff --git a/executor/batchsubmitter/handler.go b/executor/batchsubmitter/handler.go new file mode 100644 index 0000000..b801c6f --- /dev/null +++ b/executor/batchsubmitter/handler.go @@ -0,0 +1,92 @@ +package batchsubmitter + +import ( + "github.com/pkg/errors" + + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + + "github.com/cosmos/gogoproto/proto" + + "github.com/initia-labs/opinit-bots/node" + "github.com/initia-labs/opinit-bots/node/broadcaster" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + "github.com/initia-labs/opinit-bots/types" +) + +func (bs *BatchSubmitter) rawBlockHandler(ctx types.Context, args nodetypes.RawBlockArgs) error { + // clear processed messages + bs.processedMsgs = bs.processedMsgs[:0] + bs.stage.Reset() + + err := bs.prepareBatch(args.BlockHeight) + if err != nil { + return errors.Wrap(err, "failed to prepare batch") + } + + pbb := new(cmtproto.Block) + err = proto.Unmarshal(args.BlockBytes, pbb) + if err != nil { + return errors.Wrap(err, "failed to unmarshal block") + } + + pbb, err = bs.emptyOracleData(pbb) + if err != nil { + return errors.Wrap(err, "failed to empty oracle data") + } + + // convert block to bytes + blockBytes, err := proto.Marshal(pbb) + if err != nil { + return errors.Wrap(err, "failed to marshal block") + } + + _, err = bs.handleBatch(blockBytes) + if err != nil { + return errors.Wrap(err, "failed to handle batch") + } + + fileSize, err := bs.batchFileSize(true) + if err != nil { + return errors.Wrap(err, "failed to get batch file size") + } + bs.localBatchInfo.BatchSize = fileSize + + if bs.checkBatch(args.BlockHeight, args.LatestHeight, pbb.Header.Time) { + // finalize the batch + bs.LastBatchEndBlockNumber = args.BlockHeight + bs.localBatchInfo.LastSubmissionTime = pbb.Header.Time + bs.localBatchInfo.End = args.BlockHeight + + err := bs.finalizeBatch(ctx, args.BlockHeight) + if err != nil { + return errors.Wrap(err, "failed to finalize batch") + } + } + + // store the processed state into db with batch operation + err = node.SetSyncedHeight(bs.stage, args.BlockHeight) + if err != nil { + return errors.Wrap(err, "failed to set synced height") + } + if bs.da.HasBroadcaster() { + // save processed msgs to stage using host db + err := broadcaster.SaveProcessedMsgsBatch(bs.stage.WithPrefixedKey(bs.da.DB().PrefixedKey), bs.da.Codec(), bs.processedMsgs) + if err != nil { + return errors.Wrap(err, "failed to save processed msgs") + } + } else { + bs.processedMsgs = bs.processedMsgs[:0] + } + err = SaveLocalBatchInfo(bs.stage, *bs.localBatchInfo) + if err != nil { + return errors.Wrap(err, "failed to save local batch info") + } + + err = bs.stage.Commit() + if err != nil { + return errors.Wrap(err, "failed to commit stage") + } + // broadcast processed messages + bs.da.BroadcastProcessedMsgs(bs.processedMsgs...) + return nil +} diff --git a/executor/batchsubmitter/handler_test.go b/executor/batchsubmitter/handler_test.go new file mode 100644 index 0000000..8b43249 --- /dev/null +++ b/executor/batchsubmitter/handler_test.go @@ -0,0 +1,195 @@ +package batchsubmitter + +import ( + "compress/gzip" + "context" + "os" + "sync" + "testing" + "time" + + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cosmos/gogoproto/proto" + opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + client "github.com/initia-labs/opinit-bots/client" + mockclient "github.com/initia-labs/opinit-bots/client/mock" + "github.com/initia-labs/opinit-bots/db" + executortypes "github.com/initia-labs/opinit-bots/executor/types" + "github.com/initia-labs/opinit-bots/node" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + "github.com/initia-labs/opinit-bots/node/rpcclient" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + hostprovider "github.com/initia-labs/opinit-bots/provider/host" + "github.com/initia-labs/opinit-bots/txutils" + "github.com/initia-labs/opinit-bots/types" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/cosmos/cosmos-sdk/client/tx" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/authz" +) + +func TestRawBlockHandler(t *testing.T) { + baseDB, err := db.NewMemDB() + require.NoError(t, err) + + batchDB := baseDB.WithPrefix([]byte("test_batch")) + daDB := baseDB.WithPrefix([]byte("test_da")) + + appCodec, txConfig, err := childprovider.GetCodec("init") + require.NoError(t, err) + + mockCaller := mockclient.NewMockCaller() + rpcClient := rpcclient.NewRPCClientWithClient(appCodec, client.NewWithCaller(mockCaller)) + batchNode := node.NewTestNode(nodetypes.NodeConfig{}, batchDB, appCodec, txConfig, rpcClient, nil) + + hostCdc, _, err := hostprovider.GetCodec("init") + require.NoError(t, err) + + mockDA := NewMockDA(daDB, hostCdc, 1, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0") + + batchConfig := executortypes.BatchConfig{ + MaxChunks: 100, + MaxChunkSize: 10, + MaxSubmissionTime: 1000, + } + + batchSubmitter := BatchSubmitter{ + node: batchNode, + da: mockDA, + batchCfg: batchConfig, + bridgeInfo: ophosttypes.QueryBridgeResponse{ + BridgeConfig: ophosttypes.BridgeConfig{ + SubmissionInterval: 150, + }, + }, + batchInfoMu: &sync.Mutex{}, + batchInfos: []ophosttypes.BatchInfoWithOutput{ + { + BatchInfo: ophosttypes.BatchInfo{ + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, + Submitter: "submitter0", + }, + }, + }, + processedMsgs: make([]btypes.ProcessedMsgs, 0), + stage: batchDB.NewStage(), + } + batchSubmitter.batchFile, err = os.CreateTemp("", "batchfile") + require.NoError(t, err) + defer os.Remove(batchSubmitter.batchFile.Name()) + + batchSubmitter.batchWriter, err = gzip.NewWriterLevel(batchSubmitter.batchFile, 6) + require.NoError(t, err) + defer batchSubmitter.batchWriter.Close() + + createAuthzMsg := func(t *testing.T, sender string, msgs []sdk.Msg) *authz.MsgExec { + msgsAny := make([]*cdctypes.Any, 0) + for _, msg := range msgs { + any, err := cdctypes.NewAnyWithValue(msg) + require.NoError(t, err) + msgsAny = append(msgsAny, any) + } + return &authz.MsgExec{ + Grantee: sender, + Msgs: msgsAny, + } + } + + mockCaller.SetRawCommit(1, []byte("commit_bytes")) + authzMsg := createAuthzMsg(t, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", []sdk.Msg{&opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("oracle_data2"), Height: 4}}) + txf := tx.Factory{}.WithChainID("test_chain").WithTxConfig(txConfig) + pbb := &cmtproto.Block{ + Header: cmtproto.Header{ + Height: 1, + Time: time.Unix(0, 10).UTC(), + }, + Data: cmtproto.Data{ + Txs: [][]byte{}, + }, + } + + txb, err := txf.BuildUnsignedTx(authzMsg) + require.NoError(t, err) + txBytes, err := txutils.EncodeTx(txConfig, txb.GetTx()) + require.NoError(t, err) + pbb.Data.Txs = append(pbb.Data.Txs, txBytes) + + blockBytes, err := proto.Marshal(pbb) + require.NoError(t, err) + + ctx := types.NewContext(context.TODO(), zap.NewNop(), "") + + err = SaveLocalBatchInfo(batchDB, executortypes.LocalBatchInfo{ + Start: 1, + End: 0, + LastSubmissionTime: time.Unix(0, 0).UTC(), + }) + require.NoError(t, err) + + err = batchSubmitter.rawBlockHandler(ctx, nodetypes.RawBlockArgs{ + BlockHeight: 1, + LatestHeight: 1, + BlockBytes: blockBytes, + }) + require.NoError(t, err) + require.Len(t, mockDA.processedMsgs, 0) + + syncedHeight, err := node.GetSyncInfo(batchDB) + require.NoError(t, err) + require.Equal(t, int64(1), syncedHeight) + localBatchInfo, err := GetLocalBatchInfo(batchDB) + require.NoError(t, err) + require.Equal(t, executortypes.LocalBatchInfo{ + Start: 1, + End: 0, + LastSubmissionTime: time.Unix(0, 0).UTC(), + BatchSize: localBatchInfo.BatchSize, + }, localBatchInfo) + + mockCaller.SetRawCommit(2, []byte("commit_bytes")) + authzMsg = createAuthzMsg(t, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", []sdk.Msg{&opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("oracle_data2"), Height: 5}}) + pbb = &cmtproto.Block{ + Header: cmtproto.Header{ + Height: 2, + Time: time.Unix(0, 110).UTC(), + }, + Data: cmtproto.Data{ + Txs: [][]byte{}, + }, + } + + txb, err = txf.BuildUnsignedTx(authzMsg) + require.NoError(t, err) + txBytes, err = txutils.EncodeTx(txConfig, txb.GetTx()) + require.NoError(t, err) + pbb.Data.Txs = append(pbb.Data.Txs, txBytes) + + blockBytes, err = proto.Marshal(pbb) + require.NoError(t, err) + + err = batchSubmitter.rawBlockHandler(ctx, nodetypes.RawBlockArgs{ + BlockHeight: 2, + LatestHeight: 2, + BlockBytes: blockBytes, + }) + require.NoError(t, err) + require.GreaterOrEqual(t, len(mockDA.processedMsgs), 1) + + syncedHeight, err = node.GetSyncInfo(batchDB) + require.NoError(t, err) + require.Equal(t, int64(2), syncedHeight) + + localBatchInfo, err = GetLocalBatchInfo(batchDB) + require.NoError(t, err) + require.Equal(t, executortypes.LocalBatchInfo{ + Start: 1, + End: 2, + LastSubmissionTime: time.Unix(0, 110).UTC(), + BatchSize: batchSubmitter.localBatchInfo.BatchSize, + }, localBatchInfo) +} diff --git a/executor/batchsubmitter/noop_da.go b/executor/batchsubmitter/noop_da.go new file mode 100644 index 0000000..cf4a5f4 --- /dev/null +++ b/executor/batchsubmitter/noop_da.go @@ -0,0 +1,30 @@ +package batchsubmitter + +import ( + executortypes "github.com/initia-labs/opinit-bots/executor/types" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + "github.com/initia-labs/opinit-bots/types" + + "github.com/cosmos/cosmos-sdk/codec" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var _ executortypes.DANode = &NoopDA{} + +type NoopDA struct { +} + +func NewNoopDA() *NoopDA { + return &NoopDA{} +} + +func (n NoopDA) Start(_ types.Context) {} +func (n NoopDA) DB() types.DB { return nil } +func (n NoopDA) Codec() codec.Codec { return nil } + +func (n NoopDA) HasBroadcaster() bool { return false } +func (n NoopDA) CreateBatchMsg(_ []byte) (sdk.Msg, string, error) { return nil, "", nil } +func (n NoopDA) BroadcastProcessedMsgs(nil ...btypes.ProcessedMsgs) {} +func (n NoopDA) GetNodeStatus() (nodetypes.Status, error) { return nodetypes.Status{}, nil } diff --git a/executor/batchsubmitter/oracle.go b/executor/batchsubmitter/oracle.go new file mode 100644 index 0000000..34e13bf --- /dev/null +++ b/executor/batchsubmitter/oracle.go @@ -0,0 +1,67 @@ +package batchsubmitter + +import ( + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/authz" + + opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + "github.com/initia-labs/opinit-bots/txutils" + "github.com/pkg/errors" +) + +// emptyOracleData converts the MsgUpdateOracle messages's data field to empty +// to decrease the size of the batch. +func (bs *BatchSubmitter) emptyOracleData(pbb *cmtproto.Block) (*cmtproto.Block, error) { + txs := pbb.Data.GetTxs() + if len(txs) == 0 { + return pbb, nil + } + txBytes := txs[0] + + txConfig := bs.node.GetTxConfig() + tx, err := txutils.DecodeTx(txConfig, txBytes) + if err != nil { + // ignore not registered tx in codec + return pbb, nil + } + + msgs := tx.GetMsgs() + // oracle tx has only one message + if len(msgs) != 1 { + return pbb, nil + } + + switch msg := msgs[0].(type) { + case *opchildtypes.MsgUpdateOracle: + msg.Data = []byte{} + case *authz.MsgExec: + if len(msg.Msgs) != 1 || msg.Msgs[0].TypeUrl != "/opinit.opchild.v1.MsgUpdateOracle" { + return pbb, nil + } + oracleMsg := &opchildtypes.MsgUpdateOracle{} + err = bs.node.Codec().UnpackAny(msg.Msgs[0], &oracleMsg) + if err != nil { + return nil, errors.Wrap(err, "failed to unpack oracle msg from authz msg") + } + oracleMsg.Data = []byte{} + msgs[0], err = childprovider.CreateAuthzMsg(msg.Grantee, oracleMsg) + if err != nil { + return nil, errors.Wrap(err, "failed to create authz msg") + } + } + + tx, err = txutils.ChangeMsgsFromTx(txConfig, tx, []sdk.Msg{msgs[0]}) + if err != nil { + return nil, errors.Wrap(err, "failed to change msgs from tx") + } + convertedTxBytes, err := txutils.EncodeTx(txConfig, tx) + if err != nil { + return nil, errors.Wrap(err, "failed to encode tx") + } + pbb.Data.Txs[0] = convertedTxBytes + + return pbb, nil +} diff --git a/executor/batchsubmitter/oracle_test.go b/executor/batchsubmitter/oracle_test.go new file mode 100644 index 0000000..39e2afe --- /dev/null +++ b/executor/batchsubmitter/oracle_test.go @@ -0,0 +1,169 @@ +package batchsubmitter + +import ( + "testing" + + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + "github.com/initia-labs/opinit-bots/db" + "github.com/initia-labs/opinit-bots/node" + "github.com/initia-labs/opinit-bots/node/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + "github.com/initia-labs/opinit-bots/txutils" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client/tx" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/authz" +) + +func TestEmptyOracleData(t *testing.T) { + baseDB, err := db.NewMemDB() + require.NoError(t, err) + + batchDB := baseDB.WithPrefix([]byte("test_batch")) + appCodec, txConfig, err := childprovider.GetCodec("init") + require.NoError(t, err) + batchNode := node.NewTestNode(types.NodeConfig{}, batchDB, appCodec, txConfig, nil, nil) + + batchSubmitter := BatchSubmitter{node: batchNode} + + createAuthzMsg := func(t *testing.T, sender string, msgs []sdk.Msg) *authz.MsgExec { + msgsAny := make([]*cdctypes.Any, 0) + for _, msg := range msgs { + any, err := cdctypes.NewAnyWithValue(msg) + require.NoError(t, err) + msgsAny = append(msgsAny, any) + } + return &authz.MsgExec{ + Grantee: sender, + Msgs: msgsAny, + } + } + + cases := []struct { + name string + txs [][]sdk.Msg + expectedTxs [][]sdk.Msg + err bool + }{ + { + name: "0th oracle tx", + txs: [][]sdk.Msg{ + { + &opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("oracle_data"), Height: 3}, + }, + }, + expectedTxs: [][]sdk.Msg{ + { + &opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte(""), Height: 3}, + }, + }, + err: false, + }, + { + name: "0th oracle tx with multiple msgs", + txs: [][]sdk.Msg{ + { + &opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("oracle_data"), Height: 3}, + &opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("oracle_data"), Height: 3}, + }, + }, + expectedTxs: [][]sdk.Msg{ + { + &opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("oracle_data"), Height: 3}, + &opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("oracle_data"), Height: 3}, + }, + }, + err: false, + }, + { + name: "1st oracle tx", + txs: [][]sdk.Msg{ + { + &opchildtypes.MsgFinalizeTokenDeposit{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("token_deposit_data"), Amount: sdk.NewInt64Coin("init", 10), Height: 5}, + }, + { + &opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("oracle_data"), Height: 3}, + }, + }, + expectedTxs: [][]sdk.Msg{ + { + &opchildtypes.MsgFinalizeTokenDeposit{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("token_deposit_data"), Amount: sdk.NewInt64Coin("init", 10), Height: 5}, + }, + { + &opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("oracle_data"), Height: 3}, + }, + }, + err: false, + }, + { + name: "no txs", + txs: [][]sdk.Msg{}, + expectedTxs: [][]sdk.Msg{}, + err: false, + }, + { + name: "oracle authz tx", + txs: [][]sdk.Msg{ + { + createAuthzMsg(t, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", []sdk.Msg{&opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("oracle_data"), Height: 3}}), + }, + }, + expectedTxs: [][]sdk.Msg{ + { + createAuthzMsg(t, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", []sdk.Msg{&opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte(""), Height: 3}}), + }, + }, + err: false, + }, + { + name: "authz tx with another msg", + txs: [][]sdk.Msg{ + { + createAuthzMsg(t, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", []sdk.Msg{&opchildtypes.MsgFinalizeTokenDeposit{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("token_deposit_data"), Amount: sdk.NewInt64Coin("init", 10), Height: 5}}), + }, + }, + expectedTxs: [][]sdk.Msg{ + { + createAuthzMsg(t, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", []sdk.Msg{&opchildtypes.MsgFinalizeTokenDeposit{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", Data: []byte("token_deposit_data"), Amount: sdk.NewInt64Coin("init", 10), Height: 5}}), + }, + }, + err: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + txf := tx.Factory{}.WithChainID("test_chain").WithTxConfig(txConfig) + pbb := cmtproto.Block{ + Data: cmtproto.Data{ + Txs: [][]byte{}, + }, + } + + for _, msgs := range tc.txs { + txb, err := txf.BuildUnsignedTx(msgs...) + require.NoError(t, err) + txBytes, err := txutils.EncodeTx(txConfig, txb.GetTx()) + require.NoError(t, err) + pbb.Data.Txs = append(pbb.Data.Txs, txBytes) + } + + changedBlock, err := batchSubmitter.emptyOracleData(&pbb) + require.NoError(t, err) + + changedBlockTxs := changedBlock.Data.GetTxs() + require.Len(t, changedBlockTxs, len(tc.expectedTxs)) + + for i, txBytes := range changedBlockTxs { + tx, err := txutils.DecodeTx(txConfig, txBytes) + require.NoError(t, err) + for j, actual := range tx.GetMsgs() { + require.Equal(t, tc.expectedTxs[i][j].String(), actual.String()) + } + } + }) + } +} diff --git a/executor/batch/status.go b/executor/batchsubmitter/status.go similarity index 87% rename from executor/batch/status.go rename to executor/batchsubmitter/status.go index 3336bc2..d0430cc 100644 --- a/executor/batch/status.go +++ b/executor/batchsubmitter/status.go @@ -1,4 +1,4 @@ -package batch +package batchsubmitter import ( "errors" @@ -11,7 +11,7 @@ import ( type Status struct { Node nodetypes.Status `json:"node"` BatchInfo ophosttypes.BatchInfo `json:"batch_info"` - CurrentBatchFileSize int64 `json:"current_batch_file_size"` + CurrentBatchSize int64 `json:"current_batch_size"` BatchStartBlockNumber int64 `json:"batch_start_block_number"` BatchEndBlockNumber int64 `json:"batch_end_block_number"` LastBatchSubmissionTime time.Time `json:"last_batch_submission_time"` @@ -29,7 +29,7 @@ func (bs BatchSubmitter) GetStatus() (Status, error) { return Status{ Node: bs.node.GetStatus(), BatchInfo: bs.BatchInfo().BatchInfo, - CurrentBatchFileSize: bs.localBatchInfo.BatchFileSize, + CurrentBatchSize: bs.localBatchInfo.BatchSize, BatchStartBlockNumber: bs.localBatchInfo.Start, BatchEndBlockNumber: bs.localBatchInfo.End, LastBatchSubmissionTime: bs.localBatchInfo.LastSubmissionTime, diff --git a/executor/celestia/celestia.go b/executor/celestia/celestia.go index 710872d..4d5474b 100644 --- a/executor/celestia/celestia.go +++ b/executor/celestia/celestia.go @@ -1,11 +1,7 @@ package celestia import ( - "context" "crypto/sha256" - "errors" - - "go.uber.org/zap" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" @@ -20,6 +16,8 @@ import ( nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" celestiatypes "github.com/initia-labs/opinit-bots/types/celestia" + + "github.com/pkg/errors" ) type batchNode interface { @@ -38,31 +36,26 @@ type Celestia struct { bridgeId uint64 namespace sh.Namespace - cfg nodetypes.NodeConfig - db types.DB - logger *zap.Logger + cfg nodetypes.NodeConfig + db types.DB } -func NewDACelestia( - version uint8, cfg nodetypes.NodeConfig, - db types.DB, logger *zap.Logger, -) *Celestia { +func NewDACelestia(version uint8, cfg nodetypes.NodeConfig, db types.DB) *Celestia { c := &Celestia{ version: version, - cfg: cfg, - db: db, - logger: logger, + cfg: cfg, + db: db, } appCodec, txConfig, err := createCodec(cfg.Bech32Prefix) if err != nil { - panic(err) + panic(errors.Wrap(err, "failed to create codec")) } - node, err := node.NewNode(cfg, db, logger, appCodec, txConfig) + node, err := node.NewNode(cfg, db, appCodec, txConfig) if err != nil { - panic(err) + panic(errors.Wrap(err, "failed to create node")) } c.node = node @@ -79,17 +72,17 @@ func createCodec(bech32Prefix string) (codec.Codec, client.TxConfig, error) { }) } -func (c *Celestia) Initialize(ctx context.Context, batch batchNode, bridgeId uint64, keyringConfig *btypes.KeyringConfig) error { +func (c *Celestia) Initialize(ctx types.Context, batch batchNode, bridgeId uint64, keyringConfig *btypes.KeyringConfig) error { err := c.node.Initialize(ctx, 0, c.keyringConfigs(keyringConfig)) if err != nil { - return err + return errors.Wrap(err, "failed to initialize node") } c.batch = batch c.bridgeId = bridgeId c.namespace, err = sh.NewV0Namespace(c.NamespaceID()) if err != nil { - return err + return errors.Wrap(err, "failed to create namespace") } return nil } @@ -98,28 +91,38 @@ func (c *Celestia) RegisterDAHandlers() { c.node.RegisterEventHandler("celestia.blob.v1.EventPayForBlobs", c.payForBlobsHandler) } -func (c *Celestia) Start(ctx context.Context) { - c.logger.Info("celestia start") +func (c *Celestia) Start(ctx types.Context) { + ctx.Logger().Info("celestia start") c.node.Start(ctx) } -func (c Celestia) BroadcastMsgs(msgs btypes.ProcessedMsgs) { - if len(msgs.Msgs) == 0 { +func (c Celestia) BroadcastProcessedMsgs(batch ...btypes.ProcessedMsgs) { + if len(batch) == 0 { return } + broadcaster := c.node.MustGetBroadcaster() + + for _, processedMsgs := range batch { + if len(processedMsgs.Msgs) == 0 { + continue + } + broadcaster.BroadcastProcessedMsgs(processedMsgs) + } +} - c.node.MustGetBroadcaster().BroadcastMsgs(msgs) +func (c Celestia) DB() types.DB { + return c.node.DB() } -func (c Celestia) ProcessedMsgsToRawKV(msgs []btypes.ProcessedMsgs, delete bool) ([]types.RawKV, error) { - return c.node.MustGetBroadcaster().ProcessedMsgsToRawKV(msgs, delete) +func (c Celestia) Codec() codec.Codec { + return c.node.Codec() } func (c *Celestia) SetBridgeId(bridgeId uint64) { c.bridgeId = bridgeId } -func (c Celestia) HasKey() bool { +func (c Celestia) HasBroadcaster() bool { return c.node.HasBroadcaster() } diff --git a/executor/celestia/handler.go b/executor/celestia/handler.go index 1e3bbf0..38fcf8f 100644 --- a/executor/celestia/handler.go +++ b/executor/celestia/handler.go @@ -1,14 +1,15 @@ package celestia import ( - "context" "encoding/base64" nodetypes "github.com/initia-labs/opinit-bots/node/types" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" "go.uber.org/zap" ) -func (c *Celestia) payForBlobsHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (c *Celestia) payForBlobsHandler(ctx types.Context, args nodetypes.EventHandlerArgs) error { var signer string var blobSizes string var namespaces string @@ -18,24 +19,24 @@ func (c *Celestia) payForBlobsHandler(_ context.Context, args nodetypes.EventHan case "c2lnbmVy": // signer value, err := base64.StdEncoding.DecodeString(attr.Value) if err != nil { - return err + return errors.Wrap(err, "failed to decode signer") } signer = string(value) case "YmxvYl9zaXplcw==": // blob_sizes value, err := base64.StdEncoding.DecodeString(attr.Value) if err != nil { - return err + return errors.Wrap(err, "failed to decode blob sizes") } blobSizes = string(value) case "bmFtZXNwYWNlcw==": // namespaces value, err := base64.StdEncoding.DecodeString(attr.Value) if err != nil { - return err + return errors.Wrap(err, "failed to decode namespaces") } namespaces = string(value) } } - c.logger.Info("record batch", + ctx.Logger().Info("record batch", zap.String("signer", signer), zap.String("blob_sizes", blobSizes), zap.String("namespaces", namespaces), diff --git a/executor/celestia/node.go b/executor/celestia/node.go index 89d596b..163cec3 100644 --- a/executor/celestia/node.go +++ b/executor/celestia/node.go @@ -4,9 +4,10 @@ import ( "context" "fmt" + "github.com/pkg/errors" + sdk "github.com/cosmos/cosmos-sdk/types" - btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" "github.com/initia-labs/opinit-bots/txutils" celestiatypes "github.com/initia-labs/opinit-bots/types/celestia" ) @@ -35,16 +36,16 @@ func (c *Celestia) BuildTxWithMessages( broadcasterAccount, err := c.node.MustGetBroadcaster().AccountByIndex(0) if err != nil { - return nil, "", err + return nil, "", errors.Wrap(err, "failed to calculate gas") } tx, err := broadcasterAccount.SimulateAndSignTx(ctx, pfbMsgs...) if err != nil { - return nil, "", err + return nil, "", errors.Wrap(err, "failed to simulate and sign tx") } txConfig := c.node.GetTxConfig() txBytes, err = txutils.EncodeTx(txConfig, tx) if err != nil { - return nil, "", err + return nil, "", errors.Wrap(err, "failed to encode tx") } blobTx := celestiatypes.BlobTx{ @@ -54,10 +55,10 @@ func (c *Celestia) BuildTxWithMessages( } blobTxBytes, err := blobTx.Marshal() if err != nil { - return nil, "", err + return nil, "", errors.Wrap(err, "failed to marshal blob tx") } - return blobTxBytes, btypes.TxHash(txBytes), nil + return blobTxBytes, txutils.TxHash(txBytes), nil } func (c *Celestia) PendingTxToProcessedMsgs( @@ -69,7 +70,7 @@ func (c *Celestia) PendingTxToProcessedMsgs( if err := blobTx.Unmarshal(txBytes); err == nil { pfbTx, err := txutils.DecodeTx(txConfig, blobTx.Tx) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to decode blob tx") } pfbMsg := pfbTx.GetMsgs()[0] @@ -83,7 +84,7 @@ func (c *Celestia) PendingTxToProcessedMsgs( tx, err := txutils.DecodeTx(txConfig, txBytes) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to decode tx") } return tx.GetMsgs(), nil } diff --git a/executor/child/child.go b/executor/child/child.go index a9ddb07..0706971 100644 --- a/executor/child/child.go +++ b/executor/child/child.go @@ -4,25 +4,29 @@ import ( "context" "time" - "go.uber.org/zap" - sdk "github.com/cosmos/cosmos-sdk/types" opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/cosmos/cosmos-sdk/codec" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" childprovider "github.com/initia-labs/opinit-bots/provider/child" + + "github.com/pkg/errors" ) type hostNode interface { - HasKey() bool - BaseAccountAddressString() (string, error) - BroadcastMsgs(btypes.ProcessedMsgs) - ProcessedMsgsToRawKV([]btypes.ProcessedMsgs, bool) ([]types.RawKV, error) + DB() types.DB + Codec() codec.Codec + + HasBroadcaster() bool + BroadcastProcessedMsgs(...btypes.ProcessedMsgs) + QueryLastOutput(context.Context, uint64) (*ophosttypes.QueryOutputProposalResponse, error) QueryOutput(context.Context, uint64, uint64, int64) (*ophosttypes.QueryOutputProposalResponse, error) @@ -43,24 +47,22 @@ type Child struct { lastFinalizedDepositL1Sequence uint64 lastOutputTime time.Time - batchKVs []types.RawKV - addressIndexMap map[string]uint64 + stage types.CommitDB } func NewChildV1( cfg nodetypes.NodeConfig, - db types.DB, logger *zap.Logger, + db types.DB, ) *Child { return &Child{ - BaseChild: childprovider.NewBaseChildV1(cfg, db, logger), - batchKVs: make([]types.RawKV, 0), - addressIndexMap: make(map[string]uint64), + BaseChild: childprovider.NewBaseChildV1(cfg, db), + stage: db.NewStage(), } } func (ch *Child) Initialize( - ctx context.Context, - processedHeight int64, + ctx types.Context, + syncedHeight int64, startOutputIndex uint64, host hostNode, bridgeInfo ophosttypes.QueryBridgeResponse, @@ -70,7 +72,7 @@ func (ch *Child) Initialize( ) error { l2Sequence, err := ch.BaseChild.Initialize( ctx, - processedHeight, + syncedHeight, startOutputIndex, bridgeInfo, keyringConfig, @@ -78,12 +80,12 @@ func (ch *Child) Initialize( disableDeleteFutureWithdrawals, ) if err != nil { - return err + return errors.Wrap(err, "failed to initialize base child") } if l2Sequence != 0 { - err = ch.DeleteFutureWithdrawals(l2Sequence) + err = DeleteFutureWithdrawals(ch.DB(), l2Sequence) if err != nil { - return err + return errors.Wrap(err, "failed to delete future withdrawals") } } diff --git a/executor/child/common_test.go b/executor/child/common_test.go new file mode 100644 index 0000000..db2c277 --- /dev/null +++ b/executor/child/common_test.go @@ -0,0 +1,117 @@ +package child + +import ( + "context" + + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type mockHost struct { + db types.DB + cdc codec.Codec + bridgeId uint64 + baseAccount string + outputs map[uint64]ophosttypes.Output + processedMsgs []btypes.ProcessedMsgs +} + +func NewMockHost(db types.DB, cdc codec.Codec, bridgeId uint64, baseAccount string, outputs map[uint64]ophosttypes.Output) *mockHost { + return &mockHost{ + db: db, + cdc: cdc, + bridgeId: bridgeId, + baseAccount: baseAccount, + outputs: outputs, + processedMsgs: make([]btypes.ProcessedMsgs, 0), + } +} + +func (m *mockHost) DB() types.DB { + return m.db +} + +func (m *mockHost) Codec() codec.Codec { + return m.cdc +} + +func (m *mockHost) HasBroadcaster() bool { + return m.baseAccount != "" +} + +func (m *mockHost) BroadcastProcessedMsgs(msgs ...btypes.ProcessedMsgs) { + m.processedMsgs = append(m.processedMsgs, msgs...) +} + +func (m *mockHost) GetMsgProposeOutput( + bridgeId uint64, + outputIndex uint64, + l2BlockNumber int64, + outputRoot []byte, +) (sdk.Msg, string, error) { + if m.baseAccount == "" { + return nil, "", nil + } + + msg := ophosttypes.NewMsgProposeOutput( + m.baseAccount, + bridgeId, + outputIndex, + types.MustInt64ToUint64(l2BlockNumber), + outputRoot, + ) + return msg, m.baseAccount, nil +} + +func (m *mockHost) QueryLastOutput(ctx context.Context, bridgeId uint64) (*ophosttypes.QueryOutputProposalResponse, error) { + if m.bridgeId != bridgeId { + return nil, nil + } + + lastIndex := uint64(0) + for outputIndex := range m.outputs { + if lastIndex < outputIndex { + lastIndex = outputIndex + } + } + + if _, ok := m.outputs[lastIndex]; !ok { + return nil, errors.New("collections: not found") + } + + return &ophosttypes.QueryOutputProposalResponse{ + BridgeId: bridgeId, + OutputIndex: lastIndex, + OutputProposal: m.outputs[lastIndex], + }, nil +} + +func (m *mockHost) QueryOutput(ctx context.Context, bridgeId uint64, outputIndex uint64, height int64) (*ophosttypes.QueryOutputProposalResponse, error) { + if m.bridgeId != bridgeId { + return nil, nil + } + + if _, ok := m.outputs[outputIndex]; !ok { + return nil, errors.New("collections: not found") + } + + return &ophosttypes.QueryOutputProposalResponse{ + BridgeId: bridgeId, + OutputIndex: outputIndex, + OutputProposal: m.outputs[outputIndex], + }, nil +} + +var _ hostNode = (*mockHost)(nil) + +func logCapturer() (*zap.Logger, *observer.ObservedLogs) { + core, logs := observer.New(zap.DebugLevel) + return zap.New(core), logs +} diff --git a/executor/child/db.go b/executor/child/db.go new file mode 100644 index 0000000..cdf720b --- /dev/null +++ b/executor/child/db.go @@ -0,0 +1,100 @@ +package child + +import ( + dbtypes "github.com/initia-labs/opinit-bots/db/types" + executortypes "github.com/initia-labs/opinit-bots/executor/types" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" +) + +// GetWithdrawal returns the withdrawal data for the given sequence from the database +func GetWithdrawal(db types.BasicDB, sequence uint64) (executortypes.WithdrawalData, error) { + dataBytes, err := db.Get(executortypes.PrefixedWithdrawalSequence(sequence)) + if err != nil { + return executortypes.WithdrawalData{}, errors.Wrap(err, "failed to get withdrawal data from db") + } + data := executortypes.WithdrawalData{} + err = data.Unmarshal(dataBytes) + return data, err +} + +func GetWithdrawalByAddress(db types.BasicDB, address string, sequence uint64) (uint64, error) { + dataBytes, err := db.Get(executortypes.PrefixedWithdrawalAddressSequence(address, sequence)) + if err != nil { + return 0, errors.Wrap(err, "failed to get withdrawal data sequence from db") + } + return dbtypes.ToUint64(dataBytes) +} + +// GetSequencesByAddress returns the withdrawal sequences for the given address from the database +func GetSequencesByAddress(db types.DB, address string, offset uint64, limit uint64, descOrder bool) (sequences []uint64, next uint64, err error) { + count := uint64(0) + fetchFn := func(key, value []byte) (bool, error) { + sequence, err := dbtypes.ToUint64(value) + if err != nil { + return true, errors.Wrap(err, "failed to convert value to uint64") + } + if count >= limit { + next = sequence + return true, nil + } + sequences = append(sequences, sequence) + count++ + return false, nil + } + + if descOrder { + var startKey []byte + if offset != 0 { + startKey = executortypes.PrefixedWithdrawalAddressSequence(address, offset) + } + err = db.ReverseIterate(dbtypes.AppendSplitter(executortypes.PrefixedWithdrawalAddress(address)), startKey, fetchFn) + if err != nil { + return nil, 0, errors.Wrap(err, "failed to iterate withdrawal address indices") + } + } else { + startKey := executortypes.PrefixedWithdrawalAddressSequence(address, offset) + err := db.Iterate(dbtypes.AppendSplitter(executortypes.PrefixedWithdrawalAddress(address)), startKey, fetchFn) + if err != nil { + return nil, 0, err + } + } + return sequences, next, nil +} + +func SaveWithdrawal(db types.BasicDB, data executortypes.WithdrawalData) error { + dataBytes, err := data.Marshal() + if err != nil { + return err + } + + err = db.Set(executortypes.PrefixedWithdrawalSequence(data.Sequence), dataBytes) + if err != nil { + return errors.Wrap(err, "failed to save withdrawal data") + } + err = db.Set(executortypes.PrefixedWithdrawalAddressSequence(data.To, data.Sequence), dbtypes.FromUint64(data.Sequence)) + if err != nil { + return errors.Wrap(err, "failed to save withdrawal address index") + } + return nil +} + +// DeleteFutureWithdrawals deletes all future withdrawals from the database starting from the given sequence +func DeleteFutureWithdrawals(db types.DB, fromSequence uint64) error { + return db.Iterate(dbtypes.AppendSplitter(executortypes.WithdrawalSequencePrefix), executortypes.PrefixedWithdrawalSequence(fromSequence), func(key, value []byte) (bool, error) { + data := executortypes.WithdrawalData{} + err := data.Unmarshal(value) + if err != nil { + return true, err + } + err = db.Delete(executortypes.PrefixedWithdrawalAddressSequence(data.To, data.Sequence)) + if err != nil { + return true, errors.Wrap(err, "failed to delete withdrawal address index") + } + err = db.Delete(key) + if err != nil { + return true, errors.Wrap(err, "failed to delete withdrawal data") + } + return false, nil + }) +} diff --git a/executor/child/db_test.go b/executor/child/db_test.go new file mode 100644 index 0000000..bc51022 --- /dev/null +++ b/executor/child/db_test.go @@ -0,0 +1,201 @@ +package child + +import ( + "testing" + + "github.com/initia-labs/opinit-bots/db" + executortypes "github.com/initia-labs/opinit-bots/executor/types" + "github.com/stretchr/testify/require" +) + +func TestSaveGetWithdrawal(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + _, err = GetWithdrawal(db, 10) + require.Error(t, err) + + _, err = GetWithdrawalByAddress(db, "to", 10) + require.Error(t, err) + + withdrawal := executortypes.WithdrawalData{ + Sequence: 10, + To: "to", + } + err = SaveWithdrawal(db, withdrawal) + require.NoError(t, err) + + w, err := GetWithdrawal(db, 10) + require.NoError(t, err) + require.Equal(t, withdrawal, w) + + sequence, err := GetWithdrawalByAddress(db, "to", 10) + require.NoError(t, err) + require.Equal(t, uint64(10), sequence) +} + +func TestGetSequencesByAddress(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + for i := 1; i <= 50; i += 3 { + withdrawal := executortypes.WithdrawalData{ + Sequence: uint64(i), + To: "addr0", + } + err = SaveWithdrawal(db, withdrawal) + require.NoError(t, err) + } + + cases := []struct { + name string + address string + offset uint64 + limit uint64 + descOrder bool + + expectedSequences []uint64 + expectedNext uint64 + expected bool + }{ + { + name: "offset 0, limit 10, desc", + address: "addr0", + offset: 0, + limit: 10, + descOrder: true, + + expectedSequences: []uint64{49, 46, 43, 40, 37, 34, 31, 28, 25, 22}, + expectedNext: 19, + expected: false, + }, + { + name: "offset 0, limit 10, asc", + address: "addr0", + offset: 0, + limit: 10, + descOrder: false, + + expectedSequences: []uint64{1, 4, 7, 10, 13, 16, 19, 22, 25, 28}, + expectedNext: 31, + expected: false, + }, + { + name: "offset 0, limit 100, asc", + address: "addr0", + offset: 0, + limit: 100, + descOrder: false, + + expectedSequences: []uint64{1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40, 43, 46, 49}, + expectedNext: 0, + expected: false, + }, + { + name: "offset 26, limit 3, asc", + address: "addr0", + offset: 26, + limit: 3, + descOrder: false, + + expectedSequences: []uint64{28, 31, 34}, + expectedNext: 37, + expected: false, + }, + { + name: "offset 26, limit 3, desc", + address: "addr0", + offset: 26, + limit: 3, + descOrder: true, + + expectedSequences: []uint64{25, 22, 19}, + expectedNext: 16, + expected: false, + }, + { + name: "offset 100, limit 100, asc", + address: "addr0", + offset: 100, + limit: 100, + descOrder: false, + + expectedSequences: nil, + expectedNext: 0, + expected: false, + }, + { + name: "addr1", + address: "addr1", + offset: 0, + limit: 10, + descOrder: true, + + expectedSequences: nil, + expectedNext: 0, + expected: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + sequences, next, err := GetSequencesByAddress(db, tc.address, tc.offset, tc.limit, tc.descOrder) + + if !tc.expected { + require.NoError(t, err) + require.Equal(t, tc.expectedSequences, sequences) + require.Equal(t, tc.expectedNext, next) + } else { + require.Error(t, err) + } + }) + } +} + +func TestDeleteFutureWithdrawals(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + for i := 1; i <= 10; i++ { + withdrawal := executortypes.WithdrawalData{ + Sequence: uint64(i), + To: "to", + } + err = SaveWithdrawal(db, withdrawal) + require.NoError(t, err) + } + + err = DeleteFutureWithdrawals(db, 11) + require.NoError(t, err) + for i := 1; i <= 10; i++ { + _, err := GetWithdrawal(db, uint64(i)) + require.NoError(t, err) + _, err = GetWithdrawalByAddress(db, "to", uint64(i)) + require.NoError(t, err) + } + + err = DeleteFutureWithdrawals(db, 5) + require.NoError(t, err) + for i := 1; i <= 4; i++ { + w, err := GetWithdrawal(db, uint64(i)) + require.NoError(t, err) + require.Equal(t, uint64(i), w.Sequence) + _, err = GetWithdrawalByAddress(db, "to", uint64(i)) + require.NoError(t, err) + } + for i := 5; i <= 10; i++ { + _, err := GetWithdrawal(db, uint64(i)) + require.Error(t, err) + _, err = GetWithdrawalByAddress(db, "to", uint64(i)) + require.Error(t, err) + } + + err = DeleteFutureWithdrawals(db, 0) + require.NoError(t, err) + for i := 1; i <= 10; i++ { + _, err := GetWithdrawal(db, uint64(i)) + require.Error(t, err) + _, err = GetWithdrawalByAddress(db, "to", uint64(i)) + require.Error(t, err) + } +} diff --git a/executor/child/deposit.go b/executor/child/deposit.go index 87b0522..2617fe9 100644 --- a/executor/child/deposit.go +++ b/executor/child/deposit.go @@ -1,28 +1,22 @@ package child import ( - "context" - nodetypes "github.com/initia-labs/opinit-bots/node/types" childprovider "github.com/initia-labs/opinit-bots/provider/child" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" "go.uber.org/zap" - - sdk "github.com/cosmos/cosmos-sdk/types" ) -func (ch *Child) finalizeDepositHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (ch *Child) finalizeDepositHandler(ctx types.Context, args nodetypes.EventHandlerArgs) error { l1BlockHeight, l1Sequence, from, to, baseDenom, amount, err := childprovider.ParseFinalizeDeposit(args.EventAttributes) if err != nil { - return err + return errors.Wrap(err, "failed to parse finalize deposit event") } - ch.handleFinalizeDeposit(l1BlockHeight, l1Sequence, from, to, amount, baseDenom) ch.lastFinalizedDepositL1BlockHeight = l1BlockHeight ch.lastFinalizedDepositL1Sequence = l1Sequence - return nil -} -func (ch *Child) handleFinalizeDeposit(l1BlockHeight int64, l1Sequence uint64, from string, to string, amount sdk.Coin, baseDenom string) { - ch.Logger().Info("finalize token deposit", + ctx.Logger().Info("finalize token deposit", zap.Int64("l1_blockHeight", l1BlockHeight), zap.Uint64("l1_sequence", l1Sequence), zap.String("from", from), @@ -30,4 +24,5 @@ func (ch *Child) handleFinalizeDeposit(l1BlockHeight int64, l1Sequence uint64, f zap.String("amount", amount.String()), zap.String("base_denom", baseDenom), ) + return nil } diff --git a/executor/child/deposit_test.go b/executor/child/deposit_test.go new file mode 100644 index 0000000..e804abe --- /dev/null +++ b/executor/child/deposit_test.go @@ -0,0 +1,192 @@ +package child + +import ( + "context" + "strconv" + "testing" + + abcitypes "github.com/cometbft/cometbft/abci/types" + opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/db" + "github.com/initia-labs/opinit-bots/node" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + "github.com/initia-labs/opinit-bots/types" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func FinalizeDepositEvents( + l1Sequence uint64, + sender string, + recipient string, + denom string, + baseDenom string, + amount sdk.Coin, + finalizeHeight uint64, +) []abcitypes.EventAttribute { + return []abcitypes.EventAttribute{ + { + Key: opchildtypes.AttributeKeyL1Sequence, + Value: strconv.FormatUint(l1Sequence, 10), + }, + { + Key: opchildtypes.AttributeKeySender, + Value: sender, + }, + { + Key: opchildtypes.AttributeKeyRecipient, + Value: recipient, + }, + { + Key: opchildtypes.AttributeKeyDenom, + Value: denom, + }, + { + Key: opchildtypes.AttributeKeyBaseDenom, + Value: baseDenom, + }, + { + Key: opchildtypes.AttributeKeyAmount, + Value: amount.Amount.String(), + }, + { + Key: opchildtypes.AttributeKeyFinalizeHeight, + Value: strconv.FormatUint(finalizeHeight, 10), + }, + } +} + +func TestFinalizeDepositHandler(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + childNode := node.NewTestNode(nodetypes.NodeConfig{}, db.WithPrefix([]byte("test_child")), nil, nil, nil, nil) + bridgeInfo := ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + } + + ch := Child{ + BaseChild: childprovider.NewTestBaseChild(0, childNode, nil, bridgeInfo, nil, nodetypes.NodeConfig{}), + } + + fullAttributes := FinalizeDepositEvents(1, "sender", "recipient", "denom", "baseDenom", sdk.NewInt64Coin("uinit", 10000), 2) + + cases := []struct { + name string + eventHandlerArgs nodetypes.EventHandlerArgs + expected func() (msg string, fields []zapcore.Field) + err bool + }{ + { + name: "success", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: FinalizeDepositEvents(1, "sender", "recipient", "denom", "baseDenom", sdk.NewInt64Coin("uinit", 10000), 2), + }, + expected: func() (msg string, fields []zapcore.Field) { + msg = "finalize token deposit" + fields = []zapcore.Field{ + zap.Int64("l1_blockHeight", 2), + zap.Uint64("l1_sequence", 1), + zap.String("from", "sender"), + zap.String("to", "recipient"), + zap.String("amount", "10000denom"), + zap.String("base_denom", "baseDenom"), + } + return msg, fields + }, + err: false, + }, + { + name: "missing event attribute l1 sequence", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: fullAttributes[1:], + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute sender", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:1], fullAttributes[2:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute recipient", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:2], fullAttributes[3:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute l1 denom", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:3], fullAttributes[4:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute l2 denom", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:4], fullAttributes[5:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute amount", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:5], fullAttributes[6:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute finalize height", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: fullAttributes[:6], + }, + expected: nil, + err: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + logger, observedLogs := logCapturer() + ctx := types.NewContext(context.Background(), logger, "") + + err := ch.finalizeDepositHandler(ctx, tc.eventHandlerArgs) + if !tc.err { + require.NoError(t, err) + if tc.expected != nil { + logs := observedLogs.TakeAll() + require.Len(t, logs, 1) + + expectedMsg, expectedFields := tc.expected() + require.Equal(t, expectedMsg, logs[0].Message) + require.Equal(t, expectedFields, logs[0].Context) + + expectedL1Height, err := strconv.ParseInt(tc.eventHandlerArgs.EventAttributes[6].Value, 10, 64) + require.NoError(t, err) + expectedL1Sequence, err := strconv.ParseUint(tc.eventHandlerArgs.EventAttributes[0].Value, 10, 64) + require.NoError(t, err) + + require.Equal(t, expectedL1Height, ch.lastFinalizedDepositL1BlockHeight) + require.Equal(t, expectedL1Sequence, ch.lastFinalizedDepositL1Sequence) + } + } else { + require.Error(t, err) + } + }) + } + require.NoError(t, err) +} diff --git a/executor/child/handler.go b/executor/child/handler.go index 749730b..f48000e 100644 --- a/executor/child/handler.go +++ b/executor/child/handler.go @@ -1,96 +1,72 @@ package child import ( - "context" - "errors" - "slices" - "time" - - btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + "github.com/initia-labs/opinit-bots/node" + "github.com/initia-labs/opinit-bots/node/broadcaster" nodetypes "github.com/initia-labs/opinit-bots/node/types" - "golang.org/x/exp/maps" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" ) -func (ch *Child) beginBlockHandler(ctx context.Context, args nodetypes.BeginBlockArgs) (err error) { - blockHeight := args.Block.Header.Height +func (ch *Child) beginBlockHandler(ctx types.Context, args nodetypes.BeginBlockArgs) error { ch.EmptyMsgQueue() ch.EmptyProcessedMsgs() - ch.batchKVs = ch.batchKVs[:0] - maps.Clear(ch.addressIndexMap) + ch.stage.Reset() - if ch.Merkle() == nil { - return errors.New("merkle is not initialized") - } - - err = ch.prepareTree(blockHeight) + err := ch.prepareTree(args.Block.Header.Height) if err != nil { - return err + return errors.Wrap(err, "failed to prepare tree") } err = ch.prepareOutput(ctx) if err != nil { - return err + return errors.Wrap(err, "failed to prepare output") } return nil } -func (ch *Child) endBlockHandler(_ context.Context, args nodetypes.EndBlockArgs) error { +func (ch *Child) endBlockHandler(ctx types.Context, args nodetypes.EndBlockArgs) error { blockHeight := args.Block.Header.Height - treeKVs, storageRoot, err := ch.handleTree(blockHeight, args.LatestHeight, args.BlockID, args.Block.Header) + storageRoot, err := ch.handleTree(ctx, blockHeight, args.LatestHeight, args.BlockID, args.Block.Header) if err != nil { - return err + return errors.Wrap(err, "failed to handle tree") } - ch.batchKVs = append(ch.batchKVs, treeKVs...) if storageRoot != nil { - workingTreeIndex, err := ch.GetWorkingTreeIndex() + workingTree, err := ch.WorkingTree() if err != nil { - return err + return errors.Wrap(err, "failed to get working tree") } - err = ch.handleOutput(blockHeight, ch.Version(), args.BlockID, workingTreeIndex, storageRoot) + err = ch.handleOutput(blockHeight, ch.Version(), args.BlockID, workingTree.Index, storageRoot) if err != nil { - return err + return errors.Wrap(err, "failed to handle output") } } // update the sync info - ch.batchKVs = append(ch.batchKVs, ch.Node().SyncInfoToRawKV(blockHeight)) + err = node.SetSyncedHeight(ch.stage, args.Block.Header.Height) + if err != nil { + return errors.Wrap(err, "failed to set synced height") + } // if has key, then process the messages - if ch.host.HasKey() { - msgQueues := ch.GetMsgQueue() - - for sender := range msgQueues { - msgQueue := msgQueues[sender] - for i := 0; i < len(msgQueue); i += 5 { - end := i + 5 - if end > len(msgQueue) { - end = len(msgQueue) - } + if ch.host.HasBroadcaster() { + ch.AppendProcessedMsgs(broadcaster.MsgsToProcessedMsgs(ch.GetMsgQueue())...) - ch.AppendProcessedMsgs(btypes.ProcessedMsgs{ - Sender: sender, - Msgs: slices.Clone(msgQueue[i:end]), - Timestamp: time.Now().UnixNano(), - Save: true, - }) - } - } - - msgKVs, err := ch.host.ProcessedMsgsToRawKV(ch.GetProcessedMsgs(), false) + // save processed msgs to stage using host db + err := broadcaster.SaveProcessedMsgsBatch(ch.stage.WithPrefixedKey(ch.host.DB().PrefixedKey), ch.host.Codec(), ch.GetProcessedMsgs()) if err != nil { - return err + return errors.Wrap(err, "failed to save processed msgs") } - ch.batchKVs = append(ch.batchKVs, msgKVs...) + } else { + ch.EmptyProcessedMsgs() } - err = ch.DB().RawBatchSet(ch.batchKVs...) + err = ch.stage.Commit() if err != nil { - return err + return errors.Wrap(err, "failed to commit stage") } - for _, processedMsg := range ch.GetProcessedMsgs() { - ch.host.BroadcastMsgs(processedMsg) - } + ch.host.BroadcastProcessedMsgs(ch.GetProcessedMsgs()...) return nil } diff --git a/executor/child/handler_test.go b/executor/child/handler_test.go new file mode 100644 index 0000000..f35c2dc --- /dev/null +++ b/executor/child/handler_test.go @@ -0,0 +1,278 @@ +package child + +import ( + "testing" + + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/db" + "github.com/initia-labs/opinit-bots/merkle" + merkletypes "github.com/initia-labs/opinit-bots/merkle/types" + "github.com/initia-labs/opinit-bots/node" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + hostprovider "github.com/initia-labs/opinit-bots/provider/host" + "github.com/initia-labs/opinit-bots/types" + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func TestBeginBlockHandler(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + childDB := db.WithPrefix([]byte("test_child")) + childNode := node.NewTestNode(nodetypes.NodeConfig{}, childDB, nil, nil, nil, nil) + + err = childDB.Set( + append([]byte("working_tree/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}...), + []byte(`{"version":5,"index":1,"leaf_count":2,"start_leaf_index":1,"last_siblings":{},"done":false}`), + ) + require.NoError(t, err) + + mk, err := merkle.NewMerkle(ophosttypes.GenerateNodeHash) + require.NoError(t, err) + + ch := Child{ + BaseChild: childprovider.NewTestBaseChild(0, childNode, mk, ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + }, nil, nodetypes.NodeConfig{}), + host: NewMockHost(nil, nil, 1, "", nil), + stage: db.NewStage(), + } + + msgQueue := ch.GetMsgQueue() + require.Empty(t, msgQueue["init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5"]) + ch.AppendMsgQueue(ophosttypes.NewMsgProposeOutput("init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", 1, 1, 1, []byte("oracle_tx")), "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5") + msgQueue = ch.GetMsgQueue() + require.Len(t, msgQueue["init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5"], 1) + + msgs := ch.GetProcessedMsgs() + require.Empty(t, msgs) + ch.AppendProcessedMsgs(btypes.ProcessedMsgs{}) + msgs = ch.GetProcessedMsgs() + require.Len(t, msgs, 1) + + require.Equal(t, 0, ch.stage.Len()) + err = ch.stage.Set([]byte("key"), []byte("value")) + require.NoError(t, err) + require.Equal(t, 1, ch.stage.Len()) + + err = ch.beginBlockHandler(types.Context{}, nodetypes.BeginBlockArgs{ + Block: cmtproto.Block{ + Header: cmtproto.Header{ + Height: 6, + }, + }, + }) + require.NoError(t, err) + + msgQueue = ch.GetMsgQueue() + require.Empty(t, msgQueue["init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5"]) + + msgs = ch.GetProcessedMsgs() + require.Empty(t, msgs) + + require.Equal(t, 0, ch.stage.Len()) +} + +func TestEndBlockHandler(t *testing.T) { + hostCodec, _, err := hostprovider.GetCodec("init") + + mockCount := int64(0) + mockTimestampFetcher := func() int64 { + mockCount++ + return mockCount + } + types.CurrentNanoTimestamp = mockTimestampFetcher + + cases := []struct { + name string + host *mockHost + msgQueue map[string][]sdk.Msg + processedMsgs []btypes.ProcessedMsgs + dbChanges []types.KV + endBlockArgs nodetypes.EndBlockArgs + expectedProcessedMsgs []btypes.ProcessedMsgs + expectedDB []types.KV + err bool + }{ + { + name: "success", + host: NewMockHost(nil, hostCodec, 1, "sender0", nil), + msgQueue: map[string][]sdk.Msg{ + "sender0": {&ophosttypes.MsgProposeOutput{}}, + }, + processedMsgs: []btypes.ProcessedMsgs{ + { + Sender: "sender0", + Msgs: []sdk.Msg{&ophosttypes.MsgProposeOutput{}}, + Timestamp: 10000, + Save: true, + }, + }, + dbChanges: []types.KV{ + { + Key: []byte("key1"), + Value: []byte("value1"), + }, + }, + endBlockArgs: nodetypes.EndBlockArgs{ + Block: cmtproto.Block{ + Header: cmtproto.Header{ + Height: 10, + }, + }, + }, + expectedProcessedMsgs: []btypes.ProcessedMsgs{ + { + Sender: "sender0", + Msgs: []sdk.Msg{&ophosttypes.MsgProposeOutput{}}, + Timestamp: 10000, + Save: true, + }, + { + Sender: "sender0", + Msgs: []sdk.Msg{&ophosttypes.MsgProposeOutput{}}, + Timestamp: 1, + Save: true, + }, + }, + expectedDB: []types.KV{ + { + Key: []byte("test_child/key1"), + Value: []byte("value1"), + }, + { + Key: []byte("test_child/synced_height"), + Value: []byte("10"), + }, + { + Key: append([]byte("test_host/processed_msgs/"), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x27, 0x10}...), + Value: []byte(`{"sender":"sender0","msgs":["{\"@type\":\"/opinit.ophost.v1.MsgProposeOutput\",\"proposer\":\"\",\"bridge_id\":\"0\",\"output_index\":\"0\",\"l2_block_number\":\"0\",\"output_root\":null}"],"timestamp":10000,"save":true}`), + }, + { + Key: append([]byte("test_host/processed_msgs/"), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}...), + Value: []byte(`{"sender":"sender0","msgs":["{\"@type\":\"/opinit.ophost.v1.MsgProposeOutput\",\"proposer\":\"\",\"bridge_id\":\"0\",\"output_index\":\"0\",\"l2_block_number\":\"0\",\"output_root\":null}"],"timestamp":1,"save":true}`), + }, + }, + err: false, + }, + { + name: "empty changes", + host: NewMockHost(nil, hostCodec, 1, "sender0", nil), + msgQueue: nil, + processedMsgs: nil, + dbChanges: nil, + endBlockArgs: nodetypes.EndBlockArgs{ + Block: cmtproto.Block{ + Header: cmtproto.Header{ + Height: 15, + }, + }, + }, + expectedProcessedMsgs: nil, + expectedDB: []types.KV{ + { + Key: []byte("test_child/synced_height"), + Value: []byte("15"), + }, + }, + err: false, + }, + { + name: "host no broadcaster", + host: NewMockHost(nil, hostCodec, 1, "", nil), + msgQueue: map[string][]sdk.Msg{ + "sender0": {&ophosttypes.MsgProposeOutput{}}, + }, + processedMsgs: []btypes.ProcessedMsgs{ + { + Sender: "sender0", + Msgs: []sdk.Msg{&ophosttypes.MsgProposeOutput{}}, + Timestamp: 10000, + Save: true, + }, + }, + dbChanges: nil, + endBlockArgs: nodetypes.EndBlockArgs{ + Block: cmtproto.Block{ + Header: cmtproto.Header{ + Height: 10, + }, + }, + }, + expectedProcessedMsgs: nil, + expectedDB: []types.KV{ + { + Key: []byte("test_child/synced_height"), + Value: []byte("10"), + }, + }, + err: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + basedb, err := db.NewMemDB() + require.NoError(t, err) + + childdb := basedb.WithPrefix([]byte("test_child")) + childNode := node.NewTestNode(nodetypes.NodeConfig{}, childdb, nil, nil, nil, nil) + stage := childdb.NewStage().(db.Stage) + mk, err := merkle.NewMerkle(ophosttypes.GenerateNodeHash) + require.NoError(t, err) + err = mk.PrepareWorkingTree(merkletypes.TreeInfo{ + Version: 5, + Index: 1, + LeafCount: 2, + StartLeafIndex: 1, + LastSiblings: map[uint8][]byte{}, + Done: false, + }) + require.NoError(t, err) + tc.host.db = basedb.WithPrefix([]byte("test_host")) + ch := Child{ + BaseChild: childprovider.NewTestBaseChild(0, childNode, mk, ophosttypes.QueryBridgeResponse{}, nil, nodetypes.NodeConfig{}), + host: tc.host, + stage: stage, + } + + for sender, msgs := range tc.msgQueue { + for _, msg := range msgs { + ch.AppendMsgQueue(msg, sender) + } + } + for _, processedMsgs := range tc.processedMsgs { + ch.AppendProcessedMsgs(processedMsgs) + } + + for _, kv := range tc.dbChanges { + err := ch.stage.Set(kv.Key, kv.Value) + require.NoError(t, err) + } + + err = ch.endBlockHandler(types.Context{}, tc.endBlockArgs) + if !tc.err { + require.NoError(t, err) + for i := range tc.expectedProcessedMsgs { + expectedMsg, err := tc.expectedProcessedMsgs[i].MarshalInterfaceJSON(hostCodec) + require.NoError(t, err) + actualMsg, err := tc.host.processedMsgs[i].MarshalInterfaceJSON(hostCodec) + require.NoError(t, err) + require.Equal(t, expectedMsg, actualMsg) + } + for _, kv := range tc.expectedDB { + value, err := basedb.Get(kv.Key) + require.NoError(t, err) + require.Equal(t, kv.Value, value) + } + } else { + require.Error(t, err) + } + }) + } + require.NoError(t, err) +} diff --git a/executor/child/oracle.go b/executor/child/oracle.go index de46353..c056660 100644 --- a/executor/child/oracle.go +++ b/executor/child/oracle.go @@ -1,27 +1,23 @@ package child import ( - "context" - nodetypes "github.com/initia-labs/opinit-bots/node/types" childprovider "github.com/initia-labs/opinit-bots/provider/child" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" "go.uber.org/zap" ) -func (ch *Child) updateOracleHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (ch *Child) updateOracleHandler(ctx types.Context, args nodetypes.EventHandlerArgs) error { l1BlockHeight, from, err := childprovider.ParseUpdateOracle(args.EventAttributes) if err != nil { - return err + return errors.Wrap(err, "failed to parse update oracle event") } - ch.handleUpdateOracle(l1BlockHeight, from) ch.lastUpdatedOracleL1Height = l1BlockHeight - return nil -} - -func (ch *Child) handleUpdateOracle(l1BlockHeight int64, from string) { - ch.Logger().Info("update oracle", + ctx.Logger().Info("update oracle", zap.Int64("l1_blockHeight", l1BlockHeight), zap.String("from", from), ) + return nil } diff --git a/executor/child/oracle_test.go b/executor/child/oracle_test.go new file mode 100644 index 0000000..51b313c --- /dev/null +++ b/executor/child/oracle_test.go @@ -0,0 +1,118 @@ +package child + +import ( + "context" + "strconv" + "testing" + + abcitypes "github.com/cometbft/cometbft/abci/types" + opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/db" + "github.com/initia-labs/opinit-bots/node" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + "github.com/initia-labs/opinit-bots/types" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func UpdateOracleEvents( + l1BlockHeight uint64, + from string, +) []abcitypes.EventAttribute { + return []abcitypes.EventAttribute{ + { + Key: opchildtypes.AttributeKeyHeight, + Value: strconv.FormatUint(l1BlockHeight, 10), + }, + { + Key: opchildtypes.AttributeKeyFrom, + Value: from, + }, + } +} + +func TestUpdateOracleHandler(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + childNode := node.NewTestNode(nodetypes.NodeConfig{}, db.WithPrefix([]byte("test_child")), nil, nil, nil, nil) + bridgeInfo := ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + } + + ch := Child{ + BaseChild: childprovider.NewTestBaseChild(0, childNode, nil, bridgeInfo, nil, nodetypes.NodeConfig{}), + } + + fullAttributes := UpdateOracleEvents(1, "sender") + + cases := []struct { + name string + eventHandlerArgs nodetypes.EventHandlerArgs + expected func() (msg string, fields []zapcore.Field) + err bool + }{ + { + name: "success", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: UpdateOracleEvents(1, "sender"), + }, + expected: func() (msg string, fields []zapcore.Field) { + msg = "update oracle" + fields = []zapcore.Field{ + zap.Int64("l1_blockHeight", 1), + zap.String("from", "sender"), + } + return msg, fields + }, + err: false, + }, + { + name: "missing event attribute l1 block height", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: fullAttributes[1:], + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute from", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: fullAttributes[:1], + }, + expected: nil, + err: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + logger, observedLogs := logCapturer() + ctx := types.NewContext(context.Background(), logger, "") + + err := ch.updateOracleHandler(ctx, tc.eventHandlerArgs) + if !tc.err { + require.NoError(t, err) + if tc.expected != nil { + logs := observedLogs.TakeAll() + require.Len(t, logs, 1) + + expectedMsg, expectedFields := tc.expected() + require.Equal(t, expectedMsg, logs[0].Message) + require.Equal(t, expectedFields, logs[0].Context) + + expectedL1Height, err := strconv.ParseInt(tc.eventHandlerArgs.EventAttributes[0].Value, 10, 64) + require.NoError(t, err) + + require.Equal(t, expectedL1Height, ch.lastUpdatedOracleL1Height) + } + } else { + require.Error(t, err) + } + }) + } + require.NoError(t, err) +} diff --git a/executor/child/query.go b/executor/child/query.go index db3f4ec..cadfd20 100644 --- a/executor/child/query.go +++ b/executor/child/query.go @@ -1,21 +1,21 @@ package child import ( - "encoding/json" - "errors" - "cosmossdk.io/math" + "github.com/pkg/errors" + sdk "github.com/cosmos/cosmos-sdk/types" executortypes "github.com/initia-labs/opinit-bots/executor/types" + "github.com/initia-labs/opinit-bots/merkle" merkletypes "github.com/initia-labs/opinit-bots/merkle/types" ) func (ch Child) QueryWithdrawal(sequence uint64) (executortypes.QueryWithdrawalResponse, error) { - withdrawal, err := ch.GetWithdrawal(sequence) + withdrawal, err := GetWithdrawal(ch.DB(), sequence) if err != nil { - return executortypes.QueryWithdrawalResponse{}, err + return executortypes.QueryWithdrawalResponse{}, errors.Wrap(err, "failed to get withdrawal") } amount := sdk.NewCoin(withdrawal.BaseDenom, math.NewIntFromUint64(withdrawal.Amount)) @@ -27,20 +27,23 @@ func (ch Child) QueryWithdrawal(sequence uint64) (executortypes.QueryWithdrawalR Sequence: sequence, Amount: amount, Version: []byte{ch.Version()}, + TxHeight: withdrawal.TxHeight, + TxTime: withdrawal.TxTime, + TxHash: withdrawal.TxHash, } - proofs, outputIndex, outputRoot, extraDataBytes, err := ch.Merkle().GetProofs(sequence) + proofs, outputIndex, outputRoot, extraDataBytes, err := merkle.GetProofs(ch.DB(), sequence) if errors.Is(err, merkletypes.ErrUnfinalizedTree) { // if the tree is not finalized, we just return only withdrawal info return res, nil } else if err != nil { - return executortypes.QueryWithdrawalResponse{}, err + return executortypes.QueryWithdrawalResponse{}, errors.Wrap(err, "failed to get proofs") } treeExtraData := executortypes.TreeExtraData{} - err = json.Unmarshal(extraDataBytes, &treeExtraData) + err = treeExtraData.Unmarshal(extraDataBytes) if err != nil { - return executortypes.QueryWithdrawalResponse{}, err + return executortypes.QueryWithdrawalResponse{}, errors.Wrap(err, "failed to unmarshal tree extra data") } res.WithdrawalProofs = proofs res.OutputIndex = outputIndex @@ -50,15 +53,15 @@ func (ch Child) QueryWithdrawal(sequence uint64) (executortypes.QueryWithdrawalR } func (ch Child) QueryWithdrawals(address string, offset uint64, limit uint64, descOrder bool) (executortypes.QueryWithdrawalsResponse, error) { - sequences, next, err := ch.GetSequencesByAddress(address, offset, limit, descOrder) + sequences, next, err := GetSequencesByAddress(ch.DB(), address, offset, limit, descOrder) if err != nil { - return executortypes.QueryWithdrawalsResponse{}, err + return executortypes.QueryWithdrawalsResponse{}, errors.Wrap(err, "failed to get sequences by address") } withdrawals := make([]executortypes.QueryWithdrawalResponse, 0) for _, sequence := range sequences { withdrawal, err := ch.QueryWithdrawal(sequence) if err != nil { - return executortypes.QueryWithdrawalsResponse{}, err + return executortypes.QueryWithdrawalsResponse{}, errors.Wrap(err, "failed to query withdrawal") } withdrawals = append(withdrawals, withdrawal) } diff --git a/executor/child/query_test.go b/executor/child/query_test.go new file mode 100644 index 0000000..60ad4b4 --- /dev/null +++ b/executor/child/query_test.go @@ -0,0 +1,524 @@ +package child + +import ( + "encoding/json" + "fmt" + "testing" + + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/db" + executortypes "github.com/initia-labs/opinit-bots/executor/types" + "github.com/initia-labs/opinit-bots/merkle" + merkletypes "github.com/initia-labs/opinit-bots/merkle/types" + "github.com/initia-labs/opinit-bots/node" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + "github.com/initia-labs/opinit-bots/types" + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func uint64Ptr(i uint64) *uint64 { + return &i +} + +func saveQueryData(t *testing.T, db types.DB) { + for i := 1; i <= 5; i++ { + withdrawal := executortypes.WithdrawalData{ + Sequence: uint64(i), + From: "from", + To: "to", + Amount: 100, + BaseDenom: "baseDenom", + WithdrawalHash: []byte("withdrawalHash"), + TxTime: int64(i), + TxHeight: int64(i), + TxHash: fmt.Sprintf("txHash%d", i), + } + err := SaveWithdrawal(db, withdrawal) + require.NoError(t, err) + } + + for i := 7; i <= 11; i++ { + withdrawal := executortypes.WithdrawalData{ + Sequence: uint64(i), + From: "from", + To: "to2", + Amount: 1000, + BaseDenom: "baseDenom", + WithdrawalHash: []byte("withdrawalHash"), + TxTime: int64(i), + TxHeight: int64(i), + TxHash: fmt.Sprintf("txHash%d", i), + } + err := SaveWithdrawal(db, withdrawal) + require.NoError(t, err) + } + + extraData := executortypes.NewTreeExtraData(10, 10, []byte("00000000000000000000000blockid10")) + extraDataBz, err := json.Marshal(extraData) + require.NoError(t, err) + + err = merkle.SaveFinalizedTree(db, merkletypes.FinalizedTreeInfo{ + TreeIndex: 1, + TreeHeight: 2, + Root: []byte("000000000000000000000000hash1234"), + StartLeafIndex: 1, + LeafCount: 4, + ExtraData: extraDataBz, + }) + require.NoError(t, err) + err = merkle.SaveNodes(db, []merkletypes.Node{ + {TreeIndex: 1, Height: 0, LocalNodeIndex: 0, Data: []byte("000000000000000000000000000hash1")}, + {TreeIndex: 1, Height: 0, LocalNodeIndex: 1, Data: []byte("000000000000000000000000000hash2")}, + {TreeIndex: 1, Height: 0, LocalNodeIndex: 2, Data: []byte("000000000000000000000000000hash3")}, + {TreeIndex: 1, Height: 0, LocalNodeIndex: 3, Data: []byte("000000000000000000000000000hash4")}, + {TreeIndex: 1, Height: 1, LocalNodeIndex: 0, Data: []byte("00000000000000000000000000hash12")}, + {TreeIndex: 1, Height: 1, LocalNodeIndex: 1, Data: []byte("00000000000000000000000000hash34")}, + {TreeIndex: 1, Height: 2, LocalNodeIndex: 0, Data: []byte("000000000000000000000000hash1234")}, + }...) + require.NoError(t, err) + + extraData = executortypes.NewTreeExtraData(100, 100, []byte("0000000000000000000000blockid100")) + extraDataBz, err = json.Marshal(extraData) + require.NoError(t, err) + err = merkle.SaveFinalizedTree(db, merkletypes.FinalizedTreeInfo{ + TreeIndex: 2, + TreeHeight: 3, + Root: []byte("00000000000000000000hash56789999"), + StartLeafIndex: 5, + LeafCount: 5, + ExtraData: extraDataBz, + }) + require.NoError(t, err) + err = merkle.SaveNodes(db, []merkletypes.Node{ + {TreeIndex: 2, Height: 0, LocalNodeIndex: 0, Data: []byte("000000000000000000000000000hash5")}, + {TreeIndex: 2, Height: 0, LocalNodeIndex: 1, Data: []byte("000000000000000000000000000hash6")}, + {TreeIndex: 2, Height: 0, LocalNodeIndex: 2, Data: []byte("000000000000000000000000000hash7")}, + {TreeIndex: 2, Height: 0, LocalNodeIndex: 3, Data: []byte("000000000000000000000000000hash8")}, + {TreeIndex: 2, Height: 0, LocalNodeIndex: 4, Data: []byte("000000000000000000000000000hash9")}, + {TreeIndex: 2, Height: 0, LocalNodeIndex: 5, Data: []byte("000000000000000000000000000hash9")}, + {TreeIndex: 2, Height: 0, LocalNodeIndex: 6, Data: []byte("000000000000000000000000000hash9")}, + {TreeIndex: 2, Height: 0, LocalNodeIndex: 7, Data: []byte("000000000000000000000000000hash9")}, + {TreeIndex: 2, Height: 1, LocalNodeIndex: 0, Data: []byte("00000000000000000000000000hash56")}, + {TreeIndex: 2, Height: 1, LocalNodeIndex: 1, Data: []byte("00000000000000000000000000hash78")}, + {TreeIndex: 2, Height: 1, LocalNodeIndex: 2, Data: []byte("00000000000000000000000000hash99")}, + {TreeIndex: 2, Height: 1, LocalNodeIndex: 3, Data: []byte("00000000000000000000000000hash99")}, + {TreeIndex: 2, Height: 2, LocalNodeIndex: 0, Data: []byte("000000000000000000000000hash5678")}, + {TreeIndex: 2, Height: 2, LocalNodeIndex: 1, Data: []byte("000000000000000000000000hash9999")}, + {TreeIndex: 2, Height: 3, LocalNodeIndex: 0, Data: []byte("00000000000000000000hash56789999")}, + }...) + require.NoError(t, err) + + extraData = executortypes.NewTreeExtraData(1000, 1000, []byte("000000000000000000000blockid1000")) + extraDataBz, err = json.Marshal(extraData) + require.NoError(t, err) + err = merkle.SaveFinalizedTree(db, merkletypes.FinalizedTreeInfo{ + TreeIndex: 3, + TreeHeight: 1, + Root: []byte("000000000000000000000000hash1010"), + StartLeafIndex: 10, + LeafCount: 1, + ExtraData: extraDataBz, + }) + require.NoError(t, err) + err = merkle.SaveNodes(db, []merkletypes.Node{ + {TreeIndex: 3, Height: 0, LocalNodeIndex: 0, Data: []byte("00000000000000000000000000hash10")}, + {TreeIndex: 3, Height: 0, LocalNodeIndex: 1, Data: []byte("00000000000000000000000000hash10")}, + {TreeIndex: 3, Height: 1, LocalNodeIndex: 0, Data: []byte("000000000000000000000000hash1010")}, + }...) + require.NoError(t, err) +} + +func TestQueryWithdrawal(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + childDB := db.WithPrefix([]byte("test_child")) + childNode := node.NewTestNode(nodetypes.NodeConfig{}, childDB, nil, nil, nil, nil) + + ch := Child{ + BaseChild: childprovider.NewTestBaseChild(0, childNode, nil, ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + }, nil, nodetypes.NodeConfig{}), + } + + saveQueryData(t, childDB) + + cases := []struct { + name string + sequence uint64 + result executortypes.QueryWithdrawalResponse + expected bool + }{ + { + name: "1", + sequence: 1, + result: executortypes.QueryWithdrawalResponse{ + Sequence: 1, + From: "from", + To: "to", + Amount: sdk.NewInt64Coin("baseDenom", 100), + OutputIndex: 1, + BridgeId: 1, + WithdrawalProofs: [][]byte{ + []byte("000000000000000000000000000hash2"), + []byte("00000000000000000000000000hash34"), + }, + Version: []byte{0}, + StorageRoot: []byte("000000000000000000000000hash1234"), + LastBlockHash: []byte("00000000000000000000000blockid10"), + TxTime: 1, + TxHeight: 1, + TxHash: "txHash1", + }, + expected: true, + }, + { + name: "5", + sequence: 5, + result: executortypes.QueryWithdrawalResponse{ + Sequence: 5, + From: "from", + To: "to", + Amount: sdk.NewInt64Coin("baseDenom", 100), + OutputIndex: 2, + BridgeId: 1, + WithdrawalProofs: [][]byte{ + []byte("000000000000000000000000000hash6"), + []byte("00000000000000000000000000hash78"), + []byte("000000000000000000000000hash9999"), + }, + Version: []byte{0}, + StorageRoot: []byte("00000000000000000000hash56789999"), + LastBlockHash: []byte("0000000000000000000000blockid100"), + TxTime: 5, + TxHeight: 5, + TxHash: "txHash5", + }, + expected: true, + }, + { + name: "not existing sequence 6", + sequence: 6, + result: executortypes.QueryWithdrawalResponse{}, + expected: false, + }, + { + name: "7", + sequence: 7, + result: executortypes.QueryWithdrawalResponse{ + Sequence: 7, + From: "from", + To: "to2", + Amount: sdk.NewInt64Coin("baseDenom", 1000), + OutputIndex: 2, + BridgeId: 1, + WithdrawalProofs: [][]byte{ + []byte("000000000000000000000000000hash8"), + []byte("00000000000000000000000000hash56"), + []byte("000000000000000000000000hash9999"), + }, + Version: []byte{0}, + StorageRoot: []byte("00000000000000000000hash56789999"), + LastBlockHash: []byte("0000000000000000000000blockid100"), + TxTime: 7, + TxHeight: 7, + TxHash: "txHash7", + }, + expected: true, + }, + { + name: "not finalized sequence 11", + sequence: 11, + result: executortypes.QueryWithdrawalResponse{ + Sequence: 11, + From: "from", + To: "to2", + Amount: sdk.NewInt64Coin("baseDenom", 1000), + OutputIndex: 0, + BridgeId: 1, + WithdrawalProofs: nil, + Version: []byte{0}, + StorageRoot: nil, + LastBlockHash: nil, + TxTime: 11, + TxHeight: 11, + TxHash: "txHash11", + }, + expected: true, + }, + { + name: "not existing sequence 0", + sequence: 0, + result: executortypes.QueryWithdrawalResponse{}, + expected: false, + }, + { + name: "not existing sequence 12", + sequence: 12, + result: executortypes.QueryWithdrawalResponse{}, + expected: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + result, err := ch.QueryWithdrawal(tc.sequence) + if tc.expected { + require.NoError(t, err) + require.Equal(t, tc.result, result) + } else { + require.Error(t, err) + } + }) + } +} + +func TestQueryWithdrawals(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + childDB := db.WithPrefix([]byte("test_child")) + childNode := node.NewTestNode(nodetypes.NodeConfig{}, childDB, nil, nil, nil, nil) + + ch := Child{ + BaseChild: childprovider.NewTestBaseChild(0, childNode, nil, ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + }, nil, nodetypes.NodeConfig{}), + } + + saveQueryData(t, childDB) + + cases := []struct { + name string + address string + offset uint64 + limit uint64 + descOrder bool + result executortypes.QueryWithdrawalsResponse + expected bool + }{ + { + name: "to, offset 0, limit 3, asc", + address: "to", + offset: 0, + limit: 3, + descOrder: false, + result: executortypes.QueryWithdrawalsResponse{ + Withdrawals: []executortypes.QueryWithdrawalResponse{ + { + Sequence: 1, + From: "from", + To: "to", + Amount: sdk.NewInt64Coin("baseDenom", 100), + OutputIndex: 1, + BridgeId: 1, + WithdrawalProofs: [][]byte{ + []byte("000000000000000000000000000hash2"), + []byte("00000000000000000000000000hash34"), + }, + Version: []byte{0}, + StorageRoot: []byte("000000000000000000000000hash1234"), + LastBlockHash: []byte("00000000000000000000000blockid10"), + TxTime: 1, + TxHeight: 1, + TxHash: "txHash1", + }, + { + Sequence: 2, + From: "from", + To: "to", + Amount: sdk.NewInt64Coin("baseDenom", 100), + OutputIndex: 1, + BridgeId: 1, + WithdrawalProofs: [][]byte{ + []byte("000000000000000000000000000hash1"), + []byte("00000000000000000000000000hash34"), + }, + Version: []byte{0}, + StorageRoot: []byte("000000000000000000000000hash1234"), + LastBlockHash: []byte("00000000000000000000000blockid10"), + TxTime: 2, + TxHeight: 2, + TxHash: "txHash2", + }, + { + Sequence: 3, + From: "from", + To: "to", + Amount: sdk.NewInt64Coin("baseDenom", 100), + OutputIndex: 1, + BridgeId: 1, + WithdrawalProofs: [][]byte{ + []byte("000000000000000000000000000hash4"), + []byte("00000000000000000000000000hash12"), + }, + Version: []byte{0}, + StorageRoot: []byte("000000000000000000000000hash1234"), + LastBlockHash: []byte("00000000000000000000000blockid10"), + TxTime: 3, + TxHeight: 3, + TxHash: "txHash3", + }, + }, + Next: uint64Ptr(4), + }, + expected: true, + }, + { + name: "to, offset 0, limit 0, desc", + address: "to", + offset: 0, + limit: 0, + descOrder: true, + result: executortypes.QueryWithdrawalsResponse{ + Withdrawals: []executortypes.QueryWithdrawalResponse{}, + Next: uint64Ptr(5), + }, + expected: true, + }, + { + name: "to, offset 1, limit 5, desc", + address: "to", + offset: 1, + limit: 5, + descOrder: true, + result: executortypes.QueryWithdrawalsResponse{ + Withdrawals: []executortypes.QueryWithdrawalResponse{ + { + Sequence: 1, + From: "from", + To: "to", + Amount: sdk.NewInt64Coin("baseDenom", 100), + OutputIndex: 1, + BridgeId: 1, + WithdrawalProofs: [][]byte{ + []byte("000000000000000000000000000hash2"), + []byte("00000000000000000000000000hash34"), + }, + Version: []byte{0}, + StorageRoot: []byte("000000000000000000000000hash1234"), + LastBlockHash: []byte("00000000000000000000000blockid10"), + TxTime: 1, + TxHeight: 1, + TxHash: "txHash1", + }, + }, + }, + expected: true, + }, + { + name: "to2, offset 11, limit 10, desc", + address: "to2", + offset: 11, + limit: 10, + descOrder: true, + result: executortypes.QueryWithdrawalsResponse{ + Withdrawals: []executortypes.QueryWithdrawalResponse{ + { + Sequence: 11, + From: "from", + To: "to2", + Amount: sdk.NewInt64Coin("baseDenom", 1000), + OutputIndex: 0, + BridgeId: 1, + WithdrawalProofs: nil, + Version: []byte{0}, + StorageRoot: nil, + LastBlockHash: nil, + TxTime: 11, + TxHeight: 11, + TxHash: "txHash11", + }, + { + Sequence: 10, + From: "from", + To: "to2", + Amount: sdk.NewInt64Coin("baseDenom", 1000), + OutputIndex: 3, + BridgeId: 1, + WithdrawalProofs: [][]byte{ + []byte("00000000000000000000000000hash10"), + }, + Version: []byte{0}, + StorageRoot: []byte("000000000000000000000000hash1010"), + LastBlockHash: []byte("000000000000000000000blockid1000"), + TxTime: 10, + TxHeight: 10, + TxHash: "txHash10", + }, + { + Sequence: 9, + From: "from", + To: "to2", + Amount: sdk.NewInt64Coin("baseDenom", 1000), + OutputIndex: 2, + BridgeId: 1, + WithdrawalProofs: [][]byte{ + []byte("000000000000000000000000000hash9"), + []byte("00000000000000000000000000hash99"), + []byte("000000000000000000000000hash5678"), + }, + Version: []byte{0}, + StorageRoot: []byte("00000000000000000000hash56789999"), + LastBlockHash: []byte("0000000000000000000000blockid100"), + TxTime: 9, + TxHeight: 9, + TxHash: "txHash9", + }, + { + Sequence: 8, + From: "from", + To: "to2", + Amount: sdk.NewInt64Coin("baseDenom", 1000), + OutputIndex: 2, + BridgeId: 1, + WithdrawalProofs: [][]byte{ + []byte("000000000000000000000000000hash7"), + []byte("00000000000000000000000000hash56"), + []byte("000000000000000000000000hash9999"), + }, + Version: []byte{0}, + StorageRoot: []byte("00000000000000000000hash56789999"), + LastBlockHash: []byte("0000000000000000000000blockid100"), + TxTime: 8, + TxHeight: 8, + TxHash: "txHash8", + }, + { + Sequence: 7, + From: "from", + To: "to2", + Amount: sdk.NewInt64Coin("baseDenom", 1000), + OutputIndex: 2, + BridgeId: 1, + WithdrawalProofs: [][]byte{ + []byte("000000000000000000000000000hash8"), + []byte("00000000000000000000000000hash56"), + []byte("000000000000000000000000hash9999"), + }, + Version: []byte{0}, + StorageRoot: []byte("00000000000000000000hash56789999"), + LastBlockHash: []byte("0000000000000000000000blockid100"), + TxTime: 7, + TxHeight: 7, + TxHash: "txHash7", + }, + }, + }, + expected: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + result, err := ch.QueryWithdrawals(tc.address, tc.offset, tc.limit, tc.descOrder) + if tc.expected { + require.NoError(t, err) + require.Equal(t, tc.result, result) + } else { + require.Error(t, err) + } + }) + } +} diff --git a/executor/child/status.go b/executor/child/status.go index dc760eb..08ece00 100644 --- a/executor/child/status.go +++ b/executor/child/status.go @@ -1,10 +1,10 @@ package child import ( - "errors" "time" nodetypes "github.com/initia-labs/opinit-bots/node/types" + "github.com/pkg/errors" ) type Status struct { @@ -26,17 +26,9 @@ func (ch Child) GetStatus() (Status, error) { return Status{}, errors.New("node is not initialized") } - workingTreeLeafCount, err := ch.GetWorkingTreeLeafCount() + workingTree, err := ch.WorkingTree() if err != nil { - return Status{}, err - } - startLeafIndex, err := ch.GetStartLeafIndex() - if err != nil { - return Status{}, err - } - workingTreeIndex, err := ch.GetWorkingTreeIndex() - if err != nil { - return Status{}, err + return Status{}, errors.Wrap(err, "failed to get working tree") } return Status{ @@ -44,8 +36,8 @@ func (ch Child) GetStatus() (Status, error) { LastUpdatedOracleL1Height: ch.lastUpdatedOracleL1Height, LastFinalizedDepositL1BlockHeight: ch.lastFinalizedDepositL1BlockHeight, LastFinalizedDepositL1Sequence: ch.lastFinalizedDepositL1Sequence, - LastWithdrawalL2Sequence: workingTreeLeafCount + startLeafIndex - 1, - WorkingTreeIndex: workingTreeIndex, + LastWithdrawalL2Sequence: workingTree.LeafCount + workingTree.StartLeafIndex - 1, + WorkingTreeIndex: workingTree.Index, FinalizingBlockHeight: ch.finalizingBlockHeight, LastOutputSubmissionTime: ch.lastOutputTime, NextOutputSubmissionTime: ch.nextOutputTime, diff --git a/executor/child/withdraw.go b/executor/child/withdraw.go index ee15a1a..d750ddf 100644 --- a/executor/child/withdraw.go +++ b/executor/child/withdraw.go @@ -3,13 +3,16 @@ package child import ( "context" "encoding/base64" - "encoding/json" "fmt" "strings" + "time" ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" executortypes "github.com/initia-labs/opinit-bots/executor/types" + "github.com/initia-labs/opinit-bots/merkle" + "github.com/initia-labs/opinit-bots/txutils" "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" "go.uber.org/zap" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" @@ -18,88 +21,109 @@ import ( childprovider "github.com/initia-labs/opinit-bots/provider/child" ) -func (ch *Child) initiateWithdrawalHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (ch *Child) initiateWithdrawalHandler(ctx types.Context, args nodetypes.EventHandlerArgs) error { l2Sequence, amount, from, to, baseDenom, err := childprovider.ParseInitiateWithdrawal(args.EventAttributes) if err != nil { - return err + return errors.Wrap(err, "failed to parse initiate withdrawal event") } - return ch.handleInitiateWithdrawal(l2Sequence, from, to, baseDenom, amount) + err = ch.handleInitiateWithdrawal(ctx, l2Sequence, from, to, baseDenom, amount, args.BlockTime, args.BlockHeight, txutils.TxHash(args.Tx)) + if err != nil { + return errors.Wrap(err, "failed to handle initiate withdrawal") + } + return nil } -func (ch *Child) handleInitiateWithdrawal(l2Sequence uint64, from string, to string, baseDenom string, amount uint64) error { +// handleInitiateWithdrawal handles initiate withdrawal event +// 1. save withdrawal data to database +// 2. insert leaf to merkle tree +// 3. save new tree nodes +func (ch *Child) handleInitiateWithdrawal(ctx types.Context, l2Sequence uint64, from string, to string, baseDenom string, amount uint64, blockTime time.Time, blockHeight int64, txHash string) error { withdrawalHash := ophosttypes.GenerateWithdrawalHash(ch.BridgeId(), l2Sequence, from, to, baseDenom, amount) - data := executortypes.WithdrawalData{ - Sequence: l2Sequence, - From: from, - To: to, - Amount: amount, - BaseDenom: baseDenom, - WithdrawalHash: withdrawalHash[:], - } + data := executortypes.NewWithdrawalData(l2Sequence, from, to, amount, baseDenom, withdrawalHash[:], blockHeight, blockTime.UnixNano(), txHash) // store to database - kvs, err := ch.WithdrawalToRawKVs(l2Sequence, data) + err := SaveWithdrawal(ch.stage, data) if err != nil { - return err + return errors.Wrap(err, "failed to save withdrawal data") + } + + workingTree := ch.MustGetWorkingTree() + if workingTree.StartLeafIndex+workingTree.LeafCount != l2Sequence { + panic(fmt.Errorf("INVARIANT failed; handleInitiateWithdrawal expect to working tree at leaf `%d` (start `%d` + count `%d`) but we got leaf `%d`", workingTree.StartLeafIndex+workingTree.LeafCount, workingTree.StartLeafIndex, workingTree.LeafCount, l2Sequence)) } - ch.batchKVs = append(ch.batchKVs, kvs...) // generate merkle tree - err = ch.Merkle().InsertLeaf(withdrawalHash[:]) + newNodes, err := ch.Merkle().InsertLeaf(withdrawalHash[:]) if err != nil { - return err + return errors.Wrap(err, "failed to insert leaf to merkle tree") + } + err = merkle.SaveNodes(ch.stage, newNodes...) + if err != nil { + return errors.Wrap(err, "failed to save new tree nodes") } - ch.Logger().Info("initiate token withdrawal", + ctx.Logger().Info("initiate token withdrawal", zap.Uint64("l2_sequence", l2Sequence), zap.String("from", from), zap.String("to", to), zap.Uint64("amount", amount), zap.String("base_denom", baseDenom), zap.String("withdrawal", base64.StdEncoding.EncodeToString(withdrawalHash[:])), + zap.Int64("height", blockHeight), + zap.String("tx_hash", txHash), ) return nil } +// prepareTree prepares the working tree for the given block height. +// if the working tree does not exist, it initializes the tree. +// if the working tree exists, it prepares the working tree. func (ch *Child) prepareTree(blockHeight int64) error { - err := ch.Merkle().LoadWorkingTree(types.MustInt64ToUint64(blockHeight) - 1) - if err == dbtypes.ErrNotFound { + workingTree, err := merkle.GetWorkingTree(ch.DB(), types.MustInt64ToUint64(blockHeight)-1) + if errors.Is(err, dbtypes.ErrNotFound) { if ch.InitializeTree(blockHeight) { + // working tree should be initialized after the tree is initialized + _, err = ch.WorkingTree() + if err != nil { + panic("working tree not found after initializing tree") + } return nil } // must not happened panic(fmt.Errorf("working tree not found at height: %d, current: %d", blockHeight-1, blockHeight)) } else if err != nil { - return err + return errors.Wrap(err, "failed to get working tree") } + err = ch.Merkle().PrepareWorkingTree(workingTree) + if err != nil { + return errors.Wrap(err, "failed to prepare working tree") + } return nil } +// prepareOutput prepares the output for the given block height. func (ch *Child) prepareOutput(ctx context.Context) error { - workingTreeIndex, err := ch.GetWorkingTreeIndex() - if err != nil { - return err - } + workingTree := ch.MustGetWorkingTree() // initialize next output time - if ch.nextOutputTime.IsZero() && workingTreeIndex > 1 { - output, err := ch.host.QueryOutput(ctx, ch.BridgeId(), workingTreeIndex-1, 0) + if ch.nextOutputTime.IsZero() && workingTree.Index > 1 { + output, err := ch.host.QueryOutput(ctx, ch.BridgeId(), workingTree.Index-1, 0) if err != nil { // TODO: maybe not return error here and roll back - return fmt.Errorf("output does not exist at index: %d", workingTreeIndex-1) + return fmt.Errorf("output does not exist at index: %d", workingTree.Index-1) } ch.lastOutputTime = output.OutputProposal.L1BlockTime ch.nextOutputTime = output.OutputProposal.L1BlockTime.Add(ch.BridgeInfo().BridgeConfig.SubmissionInterval * 2 / 3) } - output, err := ch.host.QueryOutput(ctx, ch.BridgeId(), workingTreeIndex, 0) + output, err := ch.host.QueryOutput(ctx, ch.BridgeId(), workingTree.Index, 0) if err != nil { if strings.Contains(err.Error(), "collections: not found") { return nil } - return err + return errors.Wrap(err, "failed to query output") } else { // we are syncing ch.finalizingBlockHeight = types.MustUint64ToInt64(output.OutputProposal.L2BlockNumber) @@ -107,7 +131,8 @@ func (ch *Child) prepareOutput(ctx context.Context) error { return nil } -func (ch *Child) handleTree(blockHeight int64, latestHeight int64, blockId []byte, blockHeader cmtproto.Header) (kvs []types.RawKV, storageRoot []byte, err error) { +// handleTree handles the working tree for the given block height. +func (ch *Child) handleTree(ctx types.Context, blockHeight int64, latestHeight int64, blockId []byte, blockHeader cmtproto.Header) (storageRoot []byte, err error) { // panic if we are syncing and passed the finalizing block height // this must not happened if ch.finalizingBlockHeight != 0 && ch.finalizingBlockHeight < blockHeight { @@ -120,39 +145,37 @@ func (ch *Child) handleTree(blockHeight int64, latestHeight int64, blockId []byt blockHeight == latestHeight && blockHeader.Time.After(ch.nextOutputTime)) { - data, err := json.Marshal(executortypes.TreeExtraData{ - BlockNumber: blockHeight, - BlockHash: blockId, - }) + treeExtraData := executortypes.NewTreeExtraData(blockHeight, blockHeader.Time.UnixNano(), blockId) + data, err := treeExtraData.Marshal() if err != nil { - return nil, nil, err + return nil, err } - kvs, storageRoot, err = ch.Merkle().FinalizeWorkingTree(data) + finalizedTree, newNodes, treeRootHash, err := ch.Merkle().FinalizeWorkingTree(data) if err != nil { - return nil, nil, err + return nil, errors.Wrap(err, "failed to finalize working tree") } + storageRoot = treeRootHash - workingTreeIndex, err := ch.GetWorkingTreeIndex() - if err != nil { - return nil, nil, err + if finalizedTree != nil { + err = merkle.SaveFinalizedTree(ch.stage, *finalizedTree) + if err != nil { + return nil, errors.Wrap(err, "failed to save finalized tree") + } } - workingTreeLeafCount, err := ch.GetWorkingTreeLeafCount() + err = merkle.SaveNodes(ch.stage, newNodes...) if err != nil { - return nil, nil, err + return nil, errors.Wrap(err, "failed to save new nodes of finalized tree") } - startLeafIndex, err := ch.GetStartLeafIndex() - if err != nil { - return nil, nil, err - } + workingTree := ch.MustGetWorkingTree() - ch.Logger().Info("finalize working tree", - zap.Uint64("tree_index", workingTreeIndex), + ctx.Logger().Info("finalize working tree", + zap.Uint64("tree_index", workingTree.Index), zap.Int64("height", blockHeight), - zap.Uint64("start_leaf_index", startLeafIndex), - zap.Uint64("num_leaves", workingTreeLeafCount), + zap.Uint64("start_leaf_index", workingTree.StartLeafIndex), + zap.Uint64("num_leaves", workingTree.LeafCount), zap.String("storage_root", base64.StdEncoding.EncodeToString(storageRoot)), ) @@ -166,15 +189,16 @@ func (ch *Child) handleTree(blockHeight int64, latestHeight int64, blockId []byt ch.nextOutputTime = blockHeader.Time.Add(ch.BridgeInfo().BridgeConfig.SubmissionInterval * 2 / 3) } - version := types.MustInt64ToUint64(blockHeight) - err = ch.Merkle().SaveWorkingTree(version) + workingTree := ch.MustGetWorkingTree() + err = merkle.SaveWorkingTree(ch.stage, workingTree) if err != nil { - return nil, nil, err + return nil, errors.Wrap(err, "failed to save working tree") } - return kvs, storageRoot, nil + return storageRoot, nil } +// handleOutput handles the output for the given block height. func (ch *Child) handleOutput(blockHeight int64, version uint8, blockId []byte, outputIndex uint64, storageRoot []byte) error { outputRoot := ophosttypes.GenerateOutputRoot(version, storageRoot, blockId) msg, sender, err := ch.host.GetMsgProposeOutput( @@ -184,95 +208,9 @@ func (ch *Child) handleOutput(blockHeight int64, version uint8, blockId []byte, outputRoot[:], ) if err != nil { - return err + return errors.Wrap(err, "failed to get msg propose output") } else if msg != nil { ch.AppendMsgQueue(msg, sender) } return nil } - -// GetWithdrawal returns the withdrawal data for the given sequence from the database -func (ch *Child) GetWithdrawal(sequence uint64) (executortypes.WithdrawalData, error) { - dataBytes, err := ch.DB().Get(executortypes.PrefixedWithdrawalKey(sequence)) - if err != nil { - return executortypes.WithdrawalData{}, err - } - var data executortypes.WithdrawalData - err = json.Unmarshal(dataBytes, &data) - return data, err -} - -func (ch *Child) GetSequencesByAddress(address string, offset uint64, limit uint64, descOrder bool) (sequences []uint64, next uint64, err error) { - if limit == 0 { - return nil, 0, nil - } - - count := uint64(0) - fetchFn := func(key, value []byte) (bool, error) { - sequence, err := dbtypes.ToUint64(value) - if err != nil { - return true, err - } - if count >= limit { - next = sequence - return true, nil - } - sequences = append(sequences, sequence) - count++ - return false, nil - } - - if descOrder { - var startKey []byte - if offset != 0 { - startKey = executortypes.PrefixedWithdrawalKeyAddressIndex(address, offset) - } - err = ch.DB().PrefixedReverseIterate(executortypes.PrefixedWithdrawalKeyAddress(address), startKey, fetchFn) - if err != nil { - return nil, 0, err - } - } else { - startKey := executortypes.PrefixedWithdrawalKeyAddressIndex(address, offset) - err := ch.DB().PrefixedIterate(executortypes.PrefixedWithdrawalKeyAddress(address), startKey, fetchFn) - if err != nil { - return nil, 0, err - } - } - return sequences, next, nil -} - -// SetWithdrawal store the withdrawal data for the given sequence to the database -func (ch *Child) WithdrawalToRawKVs(sequence uint64, data executortypes.WithdrawalData) ([]types.RawKV, error) { - dataBytes, err := json.Marshal(&data) - if err != nil { - return nil, err - } - - kvs := make([]types.RawKV, 0) - kvs = append(kvs, types.RawKV{ - Key: ch.DB().PrefixedKey(executortypes.PrefixedWithdrawalKey(sequence)), - Value: dataBytes, - }) - - kvs = append(kvs, types.RawKV{ - Key: ch.DB().PrefixedKey(executortypes.PrefixedWithdrawalKeyAddressIndex(data.To, sequence)), - Value: dbtypes.FromUint64(sequence), - }) - return kvs, nil -} - -func (ch *Child) DeleteFutureWithdrawals(fromSequence uint64) error { - return ch.DB().PrefixedIterate(executortypes.WithdrawalKey, nil, func(key, _ []byte) (bool, error) { - if len(key) != len(executortypes.WithdrawalKey)+1+8 { - return false, nil - } - sequence := dbtypes.ToUint64Key(key[len(key)-8:]) - if sequence >= fromSequence { - err := ch.DB().Delete(key) - if err != nil { - return true, err - } - } - return false, nil - }) -} diff --git a/executor/child/withdraw_test.go b/executor/child/withdraw_test.go new file mode 100644 index 0000000..dffb6c8 --- /dev/null +++ b/executor/child/withdraw_test.go @@ -0,0 +1,950 @@ +package child + +import ( + "context" + "strconv" + "testing" + "time" + + abcitypes "github.com/cometbft/cometbft/abci/types" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/db" + "github.com/initia-labs/opinit-bots/merkle" + merkletypes "github.com/initia-labs/opinit-bots/merkle/types" + "github.com/initia-labs/opinit-bots/node" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + "github.com/initia-labs/opinit-bots/types" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func InitiateWithdrawalEvents( + from string, + to string, + denom string, + baseDenom string, + amount sdk.Coin, + l2Sequence uint64, +) []abcitypes.EventAttribute { + return []abcitypes.EventAttribute{ + { + Key: opchildtypes.AttributeKeyFrom, + Value: from, + }, + { + Key: opchildtypes.AttributeKeyTo, + Value: to, + }, + { + Key: opchildtypes.AttributeKeyDenom, + Value: denom, + }, + { + Key: opchildtypes.AttributeKeyBaseDenom, + Value: baseDenom, + }, + { + Key: opchildtypes.AttributeKeyAmount, + Value: amount.Amount.String(), + }, + { + Key: opchildtypes.AttributeKeyL2Sequence, + Value: strconv.FormatUint(l2Sequence, 10), + }, + } +} + +func TestInitiateWithdrawalHandler(t *testing.T) { + bridgeInfo := ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + } + + fullAttributes := InitiateWithdrawalEvents("from", "to", "denom", "uinit", sdk.NewInt64Coin("uinit", 10000), 1) + + cases := []struct { + name string + lastWorkingTree merkletypes.TreeInfo + eventHandlerArgs nodetypes.EventHandlerArgs + expectedStage []types.KV + expectedLog func() (msg string, fields []zapcore.Field) + err bool + panic bool + }{ + { + name: "success", + lastWorkingTree: merkletypes.TreeInfo{ + Version: 10, + Index: 5, + LeafCount: 0, + StartLeafIndex: 1, + LastSiblings: make(map[uint8][]byte), + Done: false, + }, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 11, + BlockTime: time.Unix(0, 10000).UTC(), + Tx: []byte("txbytes"), // EA58654919E6F3E08370DE723D8DA223F1DFE78DD28D0A23E6F18BFA0815BB99 + EventAttributes: InitiateWithdrawalEvents("from", "to", "denom", "uinit", sdk.NewInt64Coin("uinit", 10000), 1), + }, + expectedStage: []types.KV{ + { + Key: append([]byte("/test_child/withdrawal_sequence/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}...), + Value: []byte(`{"sequence":1,"from":"from","to":"to","amount":10000,"base_denom":"uinit","withdrawal_hash":"V+7ukqwrq0Ba6kj63TEZ1C7m4Ze7pqERmid/OQtNneY=","tx_height":11,"tx_time":10000,"tx_hash":"EA58654919E6F3E08370DE723D8DA223F1DFE78DD28D0A23E6F18BFA0815BB99"}`), + }, + { + Key: append([]byte("/test_child/withdrawal_address/to/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}...), + Value: []byte(`1`), + }, + { // local node 0 + Key: append([]byte("/test_child/node/"), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}...), + Value: []byte{0x57, 0xee, 0xee, 0x92, 0xac, 0x2b, 0xab, 0x40, 0x5a, 0xea, 0x48, 0xfa, 0xdd, 0x31, 0x19, 0xd4, 0x2e, 0xe6, 0xe1, 0x97, 0xbb, 0xa6, 0xa1, 0x11, 0x9a, 0x27, 0x7f, 0x39, 0x0b, 0x4d, 0x9d, 0xe6}, + }, + }, + expectedLog: func() (msg string, fields []zapcore.Field) { + msg = "initiate token withdrawal" + fields = []zapcore.Field{ + zap.Uint64("l2_sequence", 1), + zap.String("from", "from"), + zap.String("to", "to"), + zap.Uint64("amount", 10000), + zap.String("base_denom", "uinit"), + zap.String("withdrawal", "V+7ukqwrq0Ba6kj63TEZ1C7m4Ze7pqERmid/OQtNneY="), + zap.Int64("height", 11), + zap.String("tx_hash", "EA58654919E6F3E08370DE723D8DA223F1DFE78DD28D0A23E6F18BFA0815BB99"), + } + return msg, fields + }, + err: false, + panic: false, + }, + { + name: "second withdrawal", + lastWorkingTree: merkletypes.TreeInfo{ + Version: 10, + Index: 5, + LeafCount: 1, + StartLeafIndex: 100, + LastSiblings: map[uint8][]byte{ + 0: {0x5e, 0xc5, 0xb8, 0x13, 0x43, 0xb9, 0x76, 0xbb, 0xef, 0x23, 0xbc, 0x6e, 0x6a, 0xbe, 0x44, 0xa6, 0xa7, 0x17, 0x8c, 0x66, 0xae, 0xfd, 0x78, 0xe8, 0xd8, 0x1c, 0x73, 0x36, 0xf3, 0x32, 0xb6, 0x31}, + }, + Done: false, + }, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 11, + BlockTime: time.Unix(0, 10000).UTC(), + Tx: []byte("txbytes"), + EventAttributes: InitiateWithdrawalEvents("from", "to", "denom", "uinit", sdk.NewInt64Coin("uinit", 10000), 101), + }, + expectedStage: []types.KV{ + { + Key: append([]byte("/test_child/withdrawal_sequence/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65}...), + Value: []byte(`{"sequence":101,"from":"from","to":"to","amount":10000,"base_denom":"uinit","withdrawal_hash":"Hzn58U22rfXK2VZCOIFzjudpdYkw5v0eZ2QnspIFlBs=","tx_height":11,"tx_time":10000,"tx_hash":"EA58654919E6F3E08370DE723D8DA223F1DFE78DD28D0A23E6F18BFA0815BB99"}`), + }, + { + Key: append([]byte("/test_child/withdrawal_address/to/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65}...), + Value: []byte(`101`), + }, + { // local node 1 + Key: append([]byte("/test_child/node/"), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}...), + Value: []byte{0x1f, 0x39, 0xf9, 0xf1, 0x4d, 0xb6, 0xad, 0xf5, 0xca, 0xd9, 0x56, 0x42, 0x38, 0x81, 0x73, 0x8e, 0xe7, 0x69, 0x75, 0x89, 0x30, 0xe6, 0xfd, 0x1e, 0x67, 0x64, 0x27, 0xb2, 0x92, 0x05, 0x94, 0x1b}, + }, + { // height 1, local node 0 + Key: append([]byte("/test_child/node/"), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}...), + Value: []byte{0x06, 0x90, 0x8d, 0x0d, 0x10, 0x0f, 0x55, 0x78, 0xaa, 0x12, 0x81, 0xa1, 0x72, 0xbf, 0x46, 0x65, 0x09, 0xd3, 0xa0, 0x3c, 0xb2, 0x4c, 0xa1, 0xb4, 0x32, 0xb9, 0x11, 0x71, 0x5e, 0x10, 0xa9, 0xb6}, + }, + }, + expectedLog: func() (msg string, fields []zapcore.Field) { + msg = "initiate token withdrawal" + fields = []zapcore.Field{ + zap.Uint64("l2_sequence", 101), + zap.String("from", "from"), + zap.String("to", "to"), + zap.Uint64("amount", 10000), + zap.String("base_denom", "uinit"), + zap.String("withdrawal", "Hzn58U22rfXK2VZCOIFzjudpdYkw5v0eZ2QnspIFlBs="), + zap.Int64("height", 11), + zap.String("tx_hash", "EA58654919E6F3E08370DE723D8DA223F1DFE78DD28D0A23E6F18BFA0815BB99"), + } + return msg, fields + }, + err: false, + panic: false, + }, + { + name: "panic: working tree leaf count mismatch", + lastWorkingTree: merkletypes.TreeInfo{ + Version: 9, + Index: 5, + LeafCount: 0, + StartLeafIndex: 100, + LastSiblings: make(map[uint8][]byte), + Done: false, + }, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 10, + BlockTime: time.Unix(0, 10000).UTC(), + Tx: []byte("txbytes"), + EventAttributes: InitiateWithdrawalEvents("from", "to", "denom", "uinit", sdk.NewInt64Coin("uinit", 10000), 101), + }, + expectedStage: nil, + expectedLog: nil, + err: false, + panic: true, + }, + { + name: "missing event attribute from", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: fullAttributes[1:], + }, + err: true, + }, + { + name: "missing event attribute to", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:1], fullAttributes[2:]...), + }, + err: true, + }, + { + name: "missing event attribute denom", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:2], fullAttributes[3:]...), + }, + err: true, + }, + { + name: "missing event attribute base denom", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:3], fullAttributes[4:]...), + }, + err: true, + }, + { + name: "missing event attribute amount", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:4], fullAttributes[5:]...), + }, + err: true, + }, + { + name: "missing event attribute l2 sequence", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: fullAttributes[:5], + }, + err: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + logger, observedLogs := logCapturer() + ctx := types.NewContext(context.Background(), logger, "") + + basedb, err := db.NewMemDB() + require.NoError(t, err) + + childdb := basedb.WithPrefix([]byte("test_child")) + + childNode := node.NewTestNode(nodetypes.NodeConfig{}, childdb, nil, nil, nil, nil) + + mk, err := merkle.NewMerkle(ophosttypes.GenerateNodeHash) + require.NoError(t, err) + err = mk.PrepareWorkingTree(tc.lastWorkingTree) + require.NoError(t, err) + + stage := childdb.NewStage().(db.Stage) + ch := Child{ + BaseChild: childprovider.NewTestBaseChild(0, childNode, mk, bridgeInfo, nil, nodetypes.NodeConfig{}), + stage: stage, + } + + if tc.panic { + require.Panics(t, func() { + ch.initiateWithdrawalHandler(ctx, tc.eventHandlerArgs) //nolint + }) + return + } + + err = ch.initiateWithdrawalHandler(ctx, tc.eventHandlerArgs) + if !tc.err { + require.NoError(t, err) + logs := observedLogs.TakeAll() + if tc.expectedLog != nil { + require.Len(t, logs, 1) + + expectedMsg, expectedFields := tc.expectedLog() + require.Equal(t, expectedMsg, logs[0].Message) + require.Equal(t, expectedFields, logs[0].Context) + } else { + require.Len(t, logs, 0) + } + + if tc.expectedStage != nil { + allkvs := stage.All() + for _, kv := range tc.expectedStage { + require.Equal(t, kv.Value, allkvs[string(kv.Key)]) + } + } + } else { + require.Error(t, err) + } + }) + } +} + +func TestPrepareTree(t *testing.T) { + bridgeInfo := ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + } + + cases := []struct { + name string + childDBState []types.KV + blockHeight int64 + initializeTreeFnMaker func(*merkle.Merkle) func(int64) (bool, error) + expected merkletypes.TreeInfo + err bool + panic bool + }{ + { + name: "new height 6", + childDBState: []types.KV{ + { + Key: append([]byte("working_tree/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}...), + Value: []byte(`{"version":5,"index":2,"leaf_count":0,"start_leaf_index":1,"last_siblings":{},"done":false}`), + }, + }, + blockHeight: 6, + initializeTreeFnMaker: nil, + expected: merkletypes.TreeInfo{ + Version: 6, + Index: 2, + LeafCount: 0, + StartLeafIndex: 1, + LastSiblings: make(map[uint8][]byte), + Done: false, + }, + err: false, + panic: false, + }, + { + name: "no tree height 5, new height 6, no initializeTreeFn", + childDBState: nil, + blockHeight: 6, + initializeTreeFnMaker: nil, + expected: merkletypes.TreeInfo{ + Version: 6, + Index: 2, + LeafCount: 0, + StartLeafIndex: 1, + LastSiblings: make(map[uint8][]byte), + Done: false, + }, + err: false, + panic: true, + }, + { + name: "no tree height 5, new height 6, no initializing tree", + childDBState: nil, + blockHeight: 6, + initializeTreeFnMaker: func(m *merkle.Merkle) func(i int64) (bool, error) { + return func(i int64) (bool, error) { + return false, nil + } + }, + expected: merkletypes.TreeInfo{ + Version: 6, + Index: 2, + LeafCount: 0, + StartLeafIndex: 1, + LastSiblings: make(map[uint8][]byte), + Done: false, + }, + err: false, + panic: true, + }, + { + name: "tree done at 5, new height 6", + childDBState: []types.KV{ + { + Key: append([]byte("working_tree/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}...), + Value: []byte(`{"version":5,"index":2,"leaf_count":2,"start_leaf_index":1,"last_siblings":{},"done":true}`), + }, + }, + blockHeight: 6, + initializeTreeFnMaker: nil, + expected: merkletypes.TreeInfo{ + Version: 6, + Index: 3, + LeafCount: 0, + StartLeafIndex: 3, + LastSiblings: make(map[uint8][]byte), + Done: false, + }, + err: false, + panic: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + basedb, err := db.NewMemDB() + require.NoError(t, err) + + childdb := basedb.WithPrefix([]byte("test_child")) + + childNode := node.NewTestNode(nodetypes.NodeConfig{}, childdb, nil, nil, nil, nil) + + mk, err := merkle.NewMerkle(ophosttypes.GenerateNodeHash) + require.NoError(t, err) + + var initializeFn func(i int64) (bool, error) + if tc.initializeTreeFnMaker != nil { + initializeFn = tc.initializeTreeFnMaker(mk) + } + + ch := Child{ + BaseChild: childprovider.NewTestBaseChild(0, childNode, mk, bridgeInfo, initializeFn, nodetypes.NodeConfig{}), + } + + for _, kv := range tc.childDBState { + err = childdb.Set(kv.Key, kv.Value) + require.NoError(t, err) + } + + if tc.panic { + require.Panics(t, func() { + ch.prepareTree(tc.blockHeight) //nolint + }) + return + } + err = ch.prepareTree(tc.blockHeight) + if !tc.err { + require.NoError(t, err) + + tree, err := mk.WorkingTree() + require.NoError(t, err) + + require.Equal(t, tc.expected, tree) + } else { + require.Error(t, err) + } + }) + } +} + +func TestPrepareOutput(t *testing.T) { + cases := []struct { + name string + bridgeInfo ophosttypes.QueryBridgeResponse + hostOutputs map[uint64]ophosttypes.Output + lastWorkingTree merkletypes.TreeInfo + expected func() (lastOutputTime time.Time, nextOutputTime time.Time, finalizingBlockHeight int64) + err bool + }{ + { + name: "no output, index 1", + bridgeInfo: ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + BridgeConfig: ophosttypes.BridgeConfig{ + SubmissionInterval: 100, + }, + }, + hostOutputs: map[uint64]ophosttypes.Output{}, + lastWorkingTree: merkletypes.TreeInfo{ + Version: 9, + Index: 1, + LeafCount: 2, + StartLeafIndex: 1, + LastSiblings: make(map[uint8][]byte), + Done: false, + }, + expected: func() (lastOutputTime time.Time, nextOutputTime time.Time, finalizingBlockHeight int64) { + return time.Time{}, time.Time{}, 0 + }, + err: false, + }, + { + name: "no output, index 3", // chain rolled back + bridgeInfo: ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + BridgeConfig: ophosttypes.BridgeConfig{ + SubmissionInterval: 100, + }, + }, + hostOutputs: map[uint64]ophosttypes.Output{}, + lastWorkingTree: merkletypes.TreeInfo{ + Version: 9, + Index: 3, + LeafCount: 2, + StartLeafIndex: 1, + LastSiblings: make(map[uint8][]byte), + Done: false, + }, + expected: func() (lastOutputTime time.Time, nextOutputTime time.Time, finalizingBlockHeight int64) { + return time.Time{}, time.Time{}, 0 + }, + err: true, + }, + { + name: "outputs {1}, index 1", // sync + bridgeInfo: ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + BridgeConfig: ophosttypes.BridgeConfig{ + SubmissionInterval: 100, + }, + }, + hostOutputs: map[uint64]ophosttypes.Output{ + 1: { + L1BlockTime: time.Time{}, + L2BlockNumber: 10, + }, + }, + lastWorkingTree: merkletypes.TreeInfo{ + Version: 9, + Index: 1, + LeafCount: 2, + StartLeafIndex: 1, + LastSiblings: make(map[uint8][]byte), + Done: false, + }, + expected: func() (lastOutputTime time.Time, nextOutputTime time.Time, finalizingBlockHeight int64) { + return time.Time{}, time.Time{}, 10 + }, + err: false, + }, + { + name: "outputs {1}, index 2", + bridgeInfo: ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + BridgeConfig: ophosttypes.BridgeConfig{ + SubmissionInterval: 300, + }, + }, + hostOutputs: map[uint64]ophosttypes.Output{ + 1: { + L1BlockTime: time.Unix(0, 10000).UTC(), + L2BlockNumber: 10, + }, + }, + lastWorkingTree: merkletypes.TreeInfo{ + Version: 9, + Index: 2, + LeafCount: 2, + StartLeafIndex: 1, + LastSiblings: make(map[uint8][]byte), + Done: false, + }, + expected: func() (lastOutputTime time.Time, nextOutputTime time.Time, finalizingBlockHeight int64) { + return time.Unix(0, 10000).UTC(), time.Unix(0, 10200).UTC(), 0 + }, + err: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + basedb, err := db.NewMemDB() + require.NoError(t, err) + + childdb := basedb.WithPrefix([]byte("test_child")) + + childNode := node.NewTestNode(nodetypes.NodeConfig{}, childdb, nil, nil, nil, nil) + + mk, err := merkle.NewMerkle(ophosttypes.GenerateNodeHash) + require.NoError(t, err) + err = mk.PrepareWorkingTree(tc.lastWorkingTree) + require.NoError(t, err) + + mockHost := NewMockHost(nil, nil, tc.bridgeInfo.BridgeId, "", tc.hostOutputs) + + ch := Child{ + BaseChild: childprovider.NewTestBaseChild(0, childNode, mk, tc.bridgeInfo, nil, nodetypes.NodeConfig{}), + host: mockHost, + } + + err = ch.prepareOutput(context.TODO()) + if !tc.err { + require.NoError(t, err) + + expectedLastOutputTime, expectedNextOutputTime, expectedFinalizingBlockHeight := tc.expected() + require.Equal(t, expectedLastOutputTime, ch.lastOutputTime) + require.Equal(t, expectedNextOutputTime, ch.nextOutputTime) + require.Equal(t, expectedFinalizingBlockHeight, ch.finalizingBlockHeight) + } else { + require.Error(t, err) + } + }) + } +} + +func TestHandleTree(t *testing.T) { + bridgeInfo := ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + BridgeConfig: ophosttypes.BridgeConfig{ + SubmissionInterval: 300, + }, + } + blockId := []byte("test_block_id") + + cases := []struct { + name string + blockHeight int64 + latestHeight int64 + blockHeader cmtproto.Header + lastWorkingTree merkletypes.TreeInfo + lastOutputTime time.Time + nextOutputTime time.Time + finalizingBlockHeight int64 + + expected func() (storageRoot []byte, lastOutputTime time.Time, nextOutputTime time.Time, finalizingBlockHeight int64) + expectedStage []types.KV + err bool + panic bool + }{ + { + name: "current height 5, latest height 5, no leaf", // not saving finalized tree + blockHeight: 5, + latestHeight: 5, + blockHeader: cmtproto.Header{ + Time: time.Unix(0, 10100).UTC(), + }, + lastWorkingTree: merkletypes.TreeInfo{ + Version: 4, + Index: 3, + LeafCount: 0, + StartLeafIndex: 10, + LastSiblings: make(map[uint8][]byte), + Done: false, + }, + lastOutputTime: time.Time{}, + nextOutputTime: time.Unix(0, 10000).UTC(), + finalizingBlockHeight: 0, + + expected: func() (storageRoot []byte, lastOutputTime time.Time, nextOutputTime time.Time, finalizingBlockHeight int64) { + return []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + time.Unix(0, 10100).UTC(), time.Unix(0, 10300).UTC(), 0 + }, + expectedStage: []types.KV{ + { + Key: append([]byte("/test_child/working_tree/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}...), + Value: []byte(`{"version":5,"index":3,"leaf_count":0,"start_leaf_index":10,"last_siblings":{},"done":true}`), + }, + }, + err: false, + panic: false, + }, + { + name: "current height 5, latest height 5, 2 leaves", + blockHeight: 5, + latestHeight: 5, + blockHeader: cmtproto.Header{ + Time: time.Unix(0, 10100).UTC(), + }, + lastWorkingTree: merkletypes.TreeInfo{ + Version: 4, + Index: 3, + LeafCount: 2, + StartLeafIndex: 10, + LastSiblings: map[uint8][]byte{ + 0: {0xf7, 0x58, 0xe5, 0x5d, 0xb1, 0x30, 0x74, 0x4b, 0x05, 0xad, 0x66, 0x94, 0xb2, 0x8b, 0xe4, 0xab, 0x73, 0x0d, 0xe0, 0xdc, 0x09, 0xde, 0x5c, 0x0c, 0x42, 0xab, 0x64, 0x66, 0xc8, 0x06, 0xdc, 0x10}, + 1: {0x50, 0x26, 0x55, 0x2e, 0x7b, 0x21, 0xca, 0xb5, 0x27, 0xe4, 0x16, 0x9e, 0x66, 0x46, 0x02, 0xb8, 0x5d, 0x03, 0x67, 0x0b, 0xb5, 0x57, 0xe3, 0x29, 0x18, 0xd9, 0x33, 0xe3, 0xd5, 0x92, 0x5c, 0x7e}, + }, + Done: false, + }, + lastOutputTime: time.Time{}, + nextOutputTime: time.Unix(0, 10000).UTC(), + finalizingBlockHeight: 0, + + expected: func() (storageRoot []byte, lastOutputTime time.Time, nextOutputTime time.Time, finalizingBlockHeight int64) { + return []byte{0x50, 0x26, 0x55, 0x2e, 0x7b, 0x21, 0xca, 0xb5, 0x27, 0xe4, 0x16, 0x9e, 0x66, 0x46, 0x02, 0xb8, 0x5d, 0x03, 0x67, 0x0b, 0xb5, 0x57, 0xe3, 0x29, 0x18, 0xd9, 0x33, 0xe3, 0xd5, 0x92, 0x5c, 0x7e}, + time.Unix(0, 10100).UTC(), time.Unix(0, 10300).UTC(), 0 + }, + expectedStage: []types.KV{ + { + Key: append([]byte("/test_child/working_tree/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}...), + Value: []byte(`{"version":5,"index":3,"leaf_count":2,"start_leaf_index":10,"last_siblings":{"0":"91jlXbEwdEsFrWaUsovkq3MN4NwJ3lwMQqtkZsgG3BA=","1":"UCZVLnshyrUn5BaeZkYCuF0DZwu1V+MpGNkz49WSXH4="},"done":true}`), + }, + { + Key: append([]byte("/test_child/finalized_tree/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a}...), + Value: []byte(`{"tree_index":3,"tree_height":1,"root":"UCZVLnshyrUn5BaeZkYCuF0DZwu1V+MpGNkz49WSXH4=","start_leaf_index":10,"leaf_count":2,"extra_data":"eyJibG9ja19udW1iZXIiOjUsImJsb2NrX3RpbWUiOjEwMTAwLCJibG9ja19oYXNoIjoiZEdWemRGOWliRzlqYTE5cFpBPT0ifQ=="}`), + }, + }, + err: false, + panic: false, + }, + { + name: "current height 5, latest height 5, 3 leaves", + blockHeight: 5, + latestHeight: 5, + blockHeader: cmtproto.Header{ + Time: time.Unix(0, 10100).UTC(), + }, + lastWorkingTree: merkletypes.TreeInfo{ + Version: 4, + Index: 3, + LeafCount: 3, + StartLeafIndex: 10, + LastSiblings: map[uint8][]byte{ + 0: {0xd9, 0xf8, 0x70, 0xb0, 0x6d, 0x46, 0x43, 0xc5, 0x9f, 0xbd, 0x0a, 0x9a, 0xd1, 0xe5, 0x5c, 0x43, 0x98, 0xdd, 0xae, 0xf1, 0xca, 0xc2, 0xd7, 0xfb, 0xcf, 0xd5, 0xe0, 0x11, 0xb6, 0x83, 0xb8, 0x33}, + 1: {0x50, 0x26, 0x55, 0x2e, 0x7b, 0x21, 0xca, 0xb5, 0x27, 0xe4, 0x16, 0x9e, 0x66, 0x46, 0x02, 0xb8, 0x5d, 0x03, 0x67, 0x0b, 0xb5, 0x57, 0xe3, 0x29, 0x18, 0xd9, 0x33, 0xe3, 0xd5, 0x92, 0x5c, 0x7e}, + }, + Done: false, + }, + lastOutputTime: time.Time{}, + nextOutputTime: time.Unix(0, 10000).UTC(), + finalizingBlockHeight: 0, + + expected: func() (storageRoot []byte, lastOutputTime time.Time, nextOutputTime time.Time, finalizingBlockHeight int64) { + return []byte{0xff, 0xd4, 0x7a, 0x71, 0xf6, 0x3a, 0x8a, 0x50, 0x09, 0x56, 0xef, 0x34, 0xb1, 0xfa, 0xbb, 0xd4, 0x2f, 0x07, 0xc8, 0x5e, 0x77, 0xf7, 0xad, 0x21, 0x27, 0x01, 0xe0, 0x64, 0xda, 0xbd, 0xf6, 0xa3}, + time.Unix(0, 10100).UTC(), time.Unix(0, 10300).UTC(), 0 + }, + expectedStage: []types.KV{ + { + Key: append([]byte("/test_child/working_tree/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}...), + Value: []byte(`{"version":5,"index":3,"leaf_count":3,"start_leaf_index":10,"last_siblings":{"0":"2fhwsG1GQ8WfvQqa0eVcQ5jdrvHKwtf7z9XgEbaDuDM=","1":"rRHIp/aKAeTbiJgLTE+o5pTqhf9HmGTslmATJK72mmc=","2":"/9R6cfY6ilAJVu80sfq71C8HyF53960hJwHgZNq99qM="},"done":true}`), + }, + { + Key: append([]byte("/test_child/finalized_tree/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a}...), + Value: []byte(`{"tree_index":3,"tree_height":2,"root":"/9R6cfY6ilAJVu80sfq71C8HyF53960hJwHgZNq99qM=","start_leaf_index":10,"leaf_count":3,"extra_data":"eyJibG9ja19udW1iZXIiOjUsImJsb2NrX3RpbWUiOjEwMTAwLCJibG9ja19oYXNoIjoiZEdWemRGOWliRzlqYTE5cFpBPT0ifQ=="}`), + }, + { // height 0, index 3 + Key: append([]byte("/test_child/node/"), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}...), + Value: []byte{0xd9, 0xf8, 0x70, 0xb0, 0x6d, 0x46, 0x43, 0xc5, 0x9f, 0xbd, 0x0a, 0x9a, 0xd1, 0xe5, 0x5c, 0x43, 0x98, 0xdd, 0xae, 0xf1, 0xca, 0xc2, 0xd7, 0xfb, 0xcf, 0xd5, 0xe0, 0x11, 0xb6, 0x83, 0xb8, 0x33}, + }, + { // height 1, index 1 + Key: append([]byte("/test_child/node/"), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}...), + Value: []byte{0xad, 0x11, 0xc8, 0xa7, 0xf6, 0x8a, 0x01, 0xe4, 0xdb, 0x88, 0x98, 0x0b, 0x4c, 0x4f, 0xa8, 0xe6, 0x94, 0xea, 0x85, 0xff, 0x47, 0x98, 0x64, 0xec, 0x96, 0x60, 0x13, 0x24, 0xae, 0xf6, 0x9a, 0x67}, + }, + { // height 2, index 0 + Key: append([]byte("/test_child/node/"), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}...), + Value: []byte{0xff, 0xd4, 0x7a, 0x71, 0xf6, 0x3a, 0x8a, 0x50, 0x09, 0x56, 0xef, 0x34, 0xb1, 0xfa, 0xbb, 0xd4, 0x2f, 0x07, 0xc8, 0x5e, 0x77, 0xf7, 0xad, 0x21, 0x27, 0x01, 0xe0, 0x64, 0xda, 0xbd, 0xf6, 0xa3}, + }, + }, + err: false, + panic: false, + }, + { + name: "passed finalizing block height", + blockHeight: 10, + latestHeight: 10, + blockHeader: cmtproto.Header{ + Time: time.Unix(0, 10100).UTC(), + }, + lastWorkingTree: merkletypes.TreeInfo{ + Version: 9, + Index: 3, + LeafCount: 3, + StartLeafIndex: 10, + LastSiblings: map[uint8][]byte{}, + Done: false, + }, + finalizingBlockHeight: 5, + + expected: nil, + expectedStage: nil, + err: false, + panic: true, + }, + { //nolint + name: "output time not reached", + blockHeight: 5, + latestHeight: 5, + blockHeader: cmtproto.Header{ + Time: time.Unix(0, 9900).UTC(), + }, + lastWorkingTree: merkletypes.TreeInfo{ + Version: 4, + Index: 3, + LeafCount: 3, + StartLeafIndex: 10, + LastSiblings: map[uint8][]byte{ + 0: {0xd9, 0xf8, 0x70, 0xb0, 0x6d, 0x46, 0x43, 0xc5, 0x9f, 0xbd, 0x0a, 0x9a, 0xd1, 0xe5, 0x5c, 0x43, 0x98, 0xdd, 0xae, 0xf1, 0xca, 0xc2, 0xd7, 0xfb, 0xcf, 0xd5, 0xe0, 0x11, 0xb6, 0x83, 0xb8, 0x33}, + 1: {0x50, 0x26, 0x55, 0x2e, 0x7b, 0x21, 0xca, 0xb5, 0x27, 0xe4, 0x16, 0x9e, 0x66, 0x46, 0x02, 0xb8, 0x5d, 0x03, 0x67, 0x0b, 0xb5, 0x57, 0xe3, 0x29, 0x18, 0xd9, 0x33, 0xe3, 0xd5, 0x92, 0x5c, 0x7e}, + }, + Done: false, + }, + lastOutputTime: time.Time{}, + nextOutputTime: time.Unix(0, 10000).UTC(), + finalizingBlockHeight: 0, + + expected: nil, + expectedStage: []types.KV{ + { + Key: append([]byte("/test_child/working_tree/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}...), + Value: []byte(`{"version":5,"index":3,"leaf_count":3,"start_leaf_index":10,"last_siblings":{"0":"2fhwsG1GQ8WfvQqa0eVcQ5jdrvHKwtf7z9XgEbaDuDM=","1":"UCZVLnshyrUn5BaeZkYCuF0DZwu1V+MpGNkz49WSXH4="},"done":false}`), + }, + }, + err: false, + panic: false, + }, + { //nolint + name: "latest height not reached", + blockHeight: 5, + latestHeight: 6, + blockHeader: cmtproto.Header{ + Time: time.Unix(0, 9900).UTC(), + }, + lastWorkingTree: merkletypes.TreeInfo{ + Version: 4, + Index: 3, + LeafCount: 3, + StartLeafIndex: 10, + LastSiblings: map[uint8][]byte{ + 0: {0xd9, 0xf8, 0x70, 0xb0, 0x6d, 0x46, 0x43, 0xc5, 0x9f, 0xbd, 0x0a, 0x9a, 0xd1, 0xe5, 0x5c, 0x43, 0x98, 0xdd, 0xae, 0xf1, 0xca, 0xc2, 0xd7, 0xfb, 0xcf, 0xd5, 0xe0, 0x11, 0xb6, 0x83, 0xb8, 0x33}, + 1: {0x50, 0x26, 0x55, 0x2e, 0x7b, 0x21, 0xca, 0xb5, 0x27, 0xe4, 0x16, 0x9e, 0x66, 0x46, 0x02, 0xb8, 0x5d, 0x03, 0x67, 0x0b, 0xb5, 0x57, 0xe3, 0x29, 0x18, 0xd9, 0x33, 0xe3, 0xd5, 0x92, 0x5c, 0x7e}, + }, + Done: false, + }, + lastOutputTime: time.Time{}, + nextOutputTime: time.Unix(0, 10000).UTC(), + finalizingBlockHeight: 0, + + expected: nil, + expectedStage: []types.KV{ + { + Key: append([]byte("/test_child/working_tree/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}...), + Value: []byte(`{"version":5,"index":3,"leaf_count":3,"start_leaf_index":10,"last_siblings":{"0":"2fhwsG1GQ8WfvQqa0eVcQ5jdrvHKwtf7z9XgEbaDuDM=","1":"UCZVLnshyrUn5BaeZkYCuF0DZwu1V+MpGNkz49WSXH4="},"done":false}`), + }, + }, + err: false, + panic: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + basedb, err := db.NewMemDB() + require.NoError(t, err) + + childdb := basedb.WithPrefix([]byte("test_child")) + childNode := node.NewTestNode(nodetypes.NodeConfig{}, childdb, nil, nil, nil, nil) + + mk, err := merkle.NewMerkle(ophosttypes.GenerateNodeHash) + require.NoError(t, err) + err = mk.PrepareWorkingTree(tc.lastWorkingTree) + require.NoError(t, err) + + stage := childdb.NewStage().(db.Stage) + ch := Child{ + BaseChild: childprovider.NewTestBaseChild(0, childNode, mk, bridgeInfo, nil, nodetypes.NodeConfig{}), + stage: stage, + + finalizingBlockHeight: tc.finalizingBlockHeight, + lastOutputTime: tc.lastOutputTime, + nextOutputTime: tc.nextOutputTime, + } + + ctx := types.NewContext(context.Background(), zap.NewNop(), "") + if tc.panic { + require.Panics(t, func() { + ch.handleTree(ctx, tc.blockHeight, tc.latestHeight, blockId, tc.blockHeader) //nolint + }) + return + } + + storageRoot, err := ch.handleTree(ctx, tc.blockHeight, tc.latestHeight, blockId, tc.blockHeader) + if !tc.err { + require.NoError(t, err) + + if tc.expected != nil { + expectedStorageRoot, expectedLastOutputTime, expectedNextOutputTime, expectedFinalizingBlockHeight := tc.expected() + require.Equal(t, expectedStorageRoot, storageRoot) + require.Equal(t, expectedLastOutputTime, ch.lastOutputTime) + require.Equal(t, expectedNextOutputTime, ch.nextOutputTime) + require.Equal(t, expectedFinalizingBlockHeight, ch.finalizingBlockHeight) + } + + if tc.expectedStage != nil { + allkvs := stage.All() + for _, kv := range tc.expectedStage { + require.Equal(t, kv.Value, allkvs[string(kv.Key)]) + } + } + } else { + require.Error(t, err) + } + }) + } +} + +func TestHandleOutput(t *testing.T) { + cases := []struct { + name string + blockHeight int64 + version uint8 + blockId []byte + outputIndex uint64 + storageRoot []byte + bridgeInfo ophosttypes.QueryBridgeResponse + host *mockHost + expected sdk.Msg + err bool + }{ + { + name: "success", + blockHeight: 10, + version: 1, + blockId: []byte("latestBlockHashlatestBlockHashla"), + outputIndex: 1, + storageRoot: []byte("storageRootstorageRootstorageRoo"), + bridgeInfo: ophosttypes.QueryBridgeResponse{BridgeId: 1}, + host: NewMockHost(nil, nil, 1, "sender0", nil), + expected: &ophosttypes.MsgProposeOutput{ + Proposer: "sender0", + BridgeId: 1, + OutputIndex: 1, + L2BlockNumber: 10, + OutputRoot: []byte{0xc7, 0x4e, 0xaa, 0x00, 0xbb, 0xc8, 0x16, 0xd2, 0x94, 0x39, 0x01, 0x4c, 0xf7, 0x36, 0x3e, 0x29, 0xb1, 0x85, 0x18, 0x8c, 0xd4, 0x6a, 0x38, 0xfd, 0x64, 0x1f, 0xe5, 0x9f, 0xe4, 0x00, 0xbc, 0xf2}, + }, + err: false, + }, + { + name: "host no broadcaster", + blockHeight: 10, + version: 1, + blockId: []byte("latestBlockHashlatestBlockHashla"), + outputIndex: 1, + storageRoot: []byte("storageRootstorageRootstorageRoo"), + bridgeInfo: ophosttypes.QueryBridgeResponse{BridgeId: 1}, + host: NewMockHost(nil, nil, 1, "", nil), + expected: nil, + err: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + basedb, err := db.NewMemDB() + require.NoError(t, err) + + childdb := basedb.WithPrefix([]byte("test_child")) + childNode := node.NewTestNode(nodetypes.NodeConfig{}, childdb, nil, nil, nil, nil) + + ch := Child{ + BaseChild: childprovider.NewTestBaseChild(0, childNode, nil, tc.bridgeInfo, nil, nodetypes.NodeConfig{}), + host: tc.host, + } + + err = ch.handleOutput(tc.blockHeight, tc.version, tc.blockId, tc.outputIndex, tc.storageRoot) + if !tc.err { + require.NoError(t, err) + msg := ch.GetMsgQueue() + if tc.expected != nil { + require.Equal(t, 1, len(msg)) + require.Equal(t, tc.expected, msg[tc.host.baseAccount][0]) + } else { + require.Empty(t, msg[tc.host.baseAccount]) + } + } else { + require.Error(t, err) + } + }) + } +} diff --git a/executor/db.go b/executor/db.go index 4c74ace..81df964 100644 --- a/executor/db.go +++ b/executor/db.go @@ -1,7 +1,6 @@ package executor import ( - "context" "encoding/base64" "encoding/json" "fmt" @@ -14,6 +13,7 @@ import ( merkletypes "github.com/initia-labs/opinit-bots/merkle/types" "github.com/initia-labs/opinit-bots/node" "github.com/initia-labs/opinit-bots/node/rpcclient" + nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" "github.com/pkg/errors" ) @@ -23,12 +23,11 @@ func ResetHeights(db types.DB) error { types.HostName, types.ChildName, types.BatchName, - types.DAHostName, - types.DACelestiaName, + types.DAName, } for _, dbName := range dbNames { if err := ResetHeight(db, dbName); err != nil { - return err + return errors.Wrap(err, fmt.Sprintf("failed to reset height for %s", dbName)) } } return nil @@ -38,20 +37,19 @@ func ResetHeight(db types.DB, nodeName string) error { if nodeName != types.HostName && nodeName != types.ChildName && nodeName != types.BatchName && - nodeName != types.DAHostName && - nodeName != types.DACelestiaName { + nodeName != types.DAName { return errors.New("unknown node name") } nodeDB := db.WithPrefix([]byte(nodeName)) - err := node.DeleteSyncInfo(nodeDB) + err := node.DeleteSyncedHeight(nodeDB) if err != nil { - return err + return errors.Wrap(err, "failed to delete synced height") } if err := node.DeletePendingTxs(nodeDB); err != nil { - return err + return errors.Wrap(err, "failed to delete pending txs") } if err := node.DeleteProcessedMsgs(nodeDB); err != nil { - return err + return errors.Wrap(err, "failed to delete processed msgs") } fmt.Printf("reset height to 0 for node %s\n", string(nodeDB.GetPrefix())) return nil @@ -60,8 +58,8 @@ func ResetHeight(db types.DB, nodeName string) error { func Migration015(db types.DB) error { nodeDB := db.WithPrefix([]byte(types.ChildName)) addressIndexMap := make(map[string]uint64) - return nodeDB.PrefixedIterate(executortypes.WithdrawalKey, nil, func(key, value []byte) (bool, error) { - if len(key) != len(executortypes.WithdrawalKey)+1+8 { + return nodeDB.Iterate(executortypes.WithdrawalPrefix, nil, func(key, value []byte) (bool, error) { + if len(key) != len(executortypes.WithdrawalPrefix)+1+8 { return false, nil } @@ -72,7 +70,7 @@ func Migration015(db types.DB) error { return true, err } addressIndexMap[data.To]++ - err = nodeDB.Set(executortypes.PrefixedWithdrawalKeyAddressIndex(data.To, addressIndexMap[data.To]), dbtypes.FromUint64(sequence)) + err = nodeDB.Set(executortypes.PrefixedWithdrawalAddressSequence(data.To, addressIndexMap[data.To]), dbtypes.FromUint64(sequence)) if err != nil { return true, err } @@ -80,11 +78,11 @@ func Migration015(db types.DB) error { }) } -func Migration0191(db types.DB) error { +func Migration019_1(db types.DB) error { nodeDB := db.WithPrefix([]byte(types.ChildName)) merkleDB := nodeDB.WithPrefix([]byte(types.MerkleName)) - err := merkleDB.PrefixedIterate(merkletypes.FinalizedTreeKey, nil, func(key, value []byte) (bool, error) { + err := merkleDB.Iterate(merkletypes.FinalizedTreePrefix, nil, func(key, value []byte) (bool, error) { var tree merkletypes.FinalizedTreeInfo err := json.Unmarshal(value, &tree) if err != nil { @@ -104,9 +102,9 @@ func Migration0191(db types.DB) error { nextSequence := uint64(1) changeWorkingTree := false - err = merkleDB.PrefixedIterate(merkletypes.WorkingTreeKey, nil, func(key, value []byte) (bool, error) { - if len(key) != len(merkletypes.WorkingTreeKey)+1+8 { - return true, fmt.Errorf("unexpected working tree key; expected: %d; got: %d", len(merkletypes.WorkingTreeKey)+1+8, len(key)) + err = merkleDB.Iterate(merkletypes.WorkingTreePrefix, nil, func(key, value []byte) (bool, error) { + if len(key) != len(merkletypes.WorkingTreePrefix)+1+8 { + return true, fmt.Errorf("unexpected working tree key; expected: %d; got: %d", len(merkletypes.WorkingTreePrefix)+1+8, len(key)) } version := dbtypes.ToUint64Key(key[len(key)-8:]) @@ -177,14 +175,14 @@ func Migration0191(db types.DB) error { return nil } -func Migration0192(ctx context.Context, db types.DB, rpcClient *rpcclient.RPCClient) error { +func Migration019_2(ctx types.Context, db types.DB, rpcClient *rpcclient.RPCClient) error { nodeDB := db.WithPrefix([]byte(types.ChildName)) merkleDB := nodeDB.WithPrefix([]byte(types.MerkleName)) - timer := time.NewTicker(types.PollingInterval(ctx)) + timer := time.NewTicker(ctx.PollingInterval()) defer timer.Stop() - return merkleDB.PrefixedIterate(merkletypes.FinalizedTreeKey, nil, func(key, value []byte) (bool, error) { + return merkleDB.Iterate(merkletypes.FinalizedTreePrefix, nil, func(key, value []byte) (bool, error) { var tree merkletypes.FinalizedTreeInfo err := json.Unmarshal(value, &tree) if err != nil { @@ -240,10 +238,10 @@ func Migration0192(ctx context.Context, db types.DB, rpcClient *rpcclient.RPCCli func Migration0110(db types.DB) error { nodeDB := db.WithPrefix([]byte(types.ChildName)) - err := nodeDB.PrefixedIterate(executortypes.WithdrawalKey, nil, func(key, value []byte) (bool, error) { + err := nodeDB.Iterate(executortypes.WithdrawalPrefix, nil, func(key, value []byte) (bool, error) { // pass PrefixedWithdrawalKey ( WithdrawalKey / Sequence ) // we only delete PrefixedWithdrawalKeyAddressIndex ( WithdrawalKey / Address / Sequence ) - if len(key) == len(executortypes.WithdrawalKey)+1+8 { + if len(key) == len(executortypes.WithdrawalPrefix)+1+8 { return false, nil } err := nodeDB.Delete(key) @@ -256,17 +254,112 @@ func Migration0110(db types.DB) error { return err } - return nodeDB.PrefixedIterate(executortypes.WithdrawalKey, nil, func(key, value []byte) (bool, error) { + return nodeDB.Iterate(executortypes.WithdrawalPrefix, nil, func(key, value []byte) (bool, error) { sequence := dbtypes.ToUint64Key(key[len(key)-8:]) var data executortypes.WithdrawalData err := json.Unmarshal(value, &data) if err != nil { return true, err } - err = nodeDB.Set(executortypes.PrefixedWithdrawalKeyAddressIndex(data.To, sequence), dbtypes.FromUint64(sequence)) + err = nodeDB.Set(executortypes.PrefixedWithdrawalAddressSequence(data.To, sequence), dbtypes.FromUint64(sequence)) + if err != nil { + return true, err + } + return false, nil + }) +} + +func Migration0111(db types.DB) error { + DAHostName := "da_host" + DACelestiaName := "da_celestia" + + // move all data from da_host and da_celestia to da + daDB := db.WithPrefix([]byte(types.DAName)) + for _, dbName := range []string{DAHostName, DACelestiaName} { + nodeDB := db.WithPrefix([]byte(dbName)) + + err := nodeDB.Iterate(nil, nil, func(key, value []byte) (bool, error) { + err := daDB.Set(key, value) + if err != nil { + return true, errors.Wrap(err, "failed to set data to DA") + } + + err = nodeDB.Delete(key) + if err != nil { + return true, errors.Wrap(err, fmt.Sprintf("failed to delete data from %s", dbName)) + } + return false, nil + }) + if err != nil { + return err + } + } + + // change the last processed block height to synced height + for _, nodeName := range []string{ + types.HostName, + types.ChildName, + types.BatchName, + types.DAName, + } { + nodeDB := db.WithPrefix([]byte(nodeName)) + + value, err := nodeDB.Get([]byte("last_processed_block_height")) + if err == nil { + err = nodeDB.Set(nodetypes.SyncedHeightKey, value) + if err != nil { + return errors.Wrap(err, "failed to set synced height") + } + } + } + + // change WithdrawalPrefix to WithdrawalSequencePrefix or WithdrawalAddressPrefix + childDB := db.WithPrefix([]byte(types.ChildName)) + err := childDB.Iterate(executortypes.WithdrawalPrefix, nil, func(key, value []byte) (bool, error) { + if len(key) == len(executortypes.WithdrawalPrefix)+1+8 { + err := childDB.Set(append(executortypes.WithdrawalSequencePrefix, key[len(executortypes.WithdrawalPrefix):]...), value) + if err != nil { + return true, err + } + } else { + err := childDB.Set(append(executortypes.WithdrawalAddressPrefix, key[len(executortypes.WithdrawalPrefix):]...), value) + if err != nil { + return true, err + } + } + + err := childDB.Delete(key) if err != nil { return true, err } return false, nil }) + if err != nil { + return err + } + + return childDB.Iterate(merkletypes.WorkingTreePrefix, nil, func(key, value []byte) (bool, error) { + version, err := merkletypes.ParseWorkingTreeKey(key) + if err != nil { + return true, errors.Wrap(err, "failed to parse working tree key") + } + + var legacyTree merkletypes.LegacyTreeInfo + err = json.Unmarshal(value, &legacyTree) + if err != nil { + return true, errors.Wrap(err, "failed to unmarshal tree info") + } + + tree := legacyTree.Migrate(version) + treeBz, err := tree.Marshal() + if err != nil { + return true, errors.Wrap(err, "failed to marshal tree info") + } + + err = childDB.Set(key, treeBz) + if err != nil { + return true, errors.Wrap(err, "failed to set tree info") + } + return false, nil + }) } diff --git a/executor/executor.go b/executor/executor.go index 2f99d13..98de697 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -1,14 +1,11 @@ package executor import ( - "context" "fmt" - "strconv" "github.com/pkg/errors" - "github.com/gofiber/fiber/v2" - "github.com/initia-labs/opinit-bots/executor/batch" + "github.com/initia-labs/opinit-bots/executor/batchsubmitter" "github.com/initia-labs/opinit-bots/executor/celestia" "github.com/initia-labs/opinit-bots/executor/child" "github.com/initia-labs/opinit-bots/executor/host" @@ -29,19 +26,16 @@ var _ bottypes.Bot = &Executor{} // - relay l1 deposit messages to l2 // - generate l2 output root and submit to l1 type Executor struct { - host *host.Host - child *child.Child - batch *batch.BatchSubmitter + host *host.Host + child *child.Child + batchSubmitter *batchsubmitter.BatchSubmitter cfg *executortypes.Config db types.DB server *server.Server - logger *zap.Logger - - homePath string } -func NewExecutor(cfg *executortypes.Config, db types.DB, logger *zap.Logger, homePath string) *Executor { +func NewExecutor(cfg *executortypes.Config, db types.DB, sv *server.Server) *Executor { err := cfg.Validate() if err != nil { panic(err) @@ -49,31 +43,27 @@ func NewExecutor(cfg *executortypes.Config, db types.DB, logger *zap.Logger, hom return &Executor{ host: host.NewHostV1( - cfg.L1NodeConfig(homePath), + cfg.L1NodeConfig(), db.WithPrefix([]byte(types.HostName)), - logger.Named(types.HostName), ), child: child.NewChildV1( - cfg.L2NodeConfig(homePath), + cfg.L2NodeConfig(), db.WithPrefix([]byte(types.ChildName)), - logger.Named(types.ChildName), ), - batch: batch.NewBatchSubmitterV1( - cfg.L2NodeConfig(homePath), - cfg.BatchConfig(), db.WithPrefix([]byte(types.BatchName)), - logger.Named(types.BatchName), cfg.L2Node.ChainID, homePath, + batchSubmitter: batchsubmitter.NewBatchSubmitterV1( + cfg.L2NodeConfig(), + cfg.BatchConfig(), + db.WithPrefix([]byte(types.BatchName)), + cfg.L2Node.ChainID, ), cfg: cfg, db: db, - server: server.NewServer(cfg.Server), - logger: logger, - - homePath: homePath, + server: sv, } } -func (ex *Executor) Initialize(ctx context.Context) error { +func (ex *Executor) Initialize(ctx types.Context) error { childBridgeInfo, err := ex.child.QueryBridgeInfo(ctx) if err != nil { return err @@ -87,143 +77,76 @@ func (ex *Executor) Initialize(ctx context.Context) error { return err } - ex.logger.Info( + ctx.Logger().Info( "bridge info", zap.Uint64("id", bridgeInfo.BridgeId), zap.Duration("submission_interval", bridgeInfo.BridgeConfig.SubmissionInterval), ) - hostProcessedHeight, childProcessedHeight, processedOutputIndex, batchProcessedHeight, err := ex.getProcessedHeights(ctx, bridgeInfo.BridgeId) + l1StartHeight, l2StartHeight, startOutputIndex, batchStartHeight, err := ex.getNodeStartHeights(ctx, bridgeInfo.BridgeId) if err != nil { - return err + return errors.Wrap(err, "failed to get processed heights") } hostKeyringConfig, childKeyringConfig, childOracleKeyringConfig, daKeyringConfig := ex.getKeyringConfigs(*bridgeInfo) - err = ex.host.Initialize(ctx, hostProcessedHeight, ex.child, ex.batch, *bridgeInfo, hostKeyringConfig) + err = ex.host.Initialize(ctx, l1StartHeight-1, ex.child, ex.batchSubmitter, *bridgeInfo, hostKeyringConfig) if err != nil { - return err + return errors.Wrap(err, "failed to initialize host") } - err = ex.child.Initialize( - ctx, - childProcessedHeight, - processedOutputIndex+1, - ex.host, - *bridgeInfo, - childKeyringConfig, - childOracleKeyringConfig, - ex.cfg.DisableDeleteFutureWithdrawal, - ) + err = ex.child.Initialize(ctx, l2StartHeight-1, startOutputIndex, ex.host, *bridgeInfo, childKeyringConfig, childOracleKeyringConfig, ex.cfg.DisableDeleteFutureWithdrawal) if err != nil { - return err + return errors.Wrap(err, "failed to initialize child") } - err = ex.batch.Initialize(ctx, batchProcessedHeight, ex.host, *bridgeInfo) + err = ex.batchSubmitter.Initialize(ctx, batchStartHeight-1, ex.host, *bridgeInfo) if err != nil { - return err + return errors.Wrap(err, "failed to initialize batch") } da, err := ex.makeDANode(ctx, *bridgeInfo, daKeyringConfig) if err != nil { - return err + return errors.Wrap(err, "failed to make DA node") } - ex.batch.SetDANode(da) + ex.batchSubmitter.SetDANode(da) ex.RegisterQuerier() return nil } -func (ex *Executor) Start(ctx context.Context) error { +func (ex *Executor) Start(ctx types.Context) error { defer ex.Close() - errGrp := types.ErrGrp(ctx) - errGrp.Go(func() (err error) { + ctx.ErrGrp().Go(func() (err error) { <-ctx.Done() return ex.server.Shutdown() }) - errGrp.Go(func() (err error) { + ctx.ErrGrp().Go(func() (err error) { defer func() { - ex.logger.Info("api server stopped") + ctx.Logger().Info("api server stopped") }() return ex.server.Start() }) ex.host.Start(ctx) ex.child.Start(ctx) - ex.batch.Start(ctx) - ex.batch.DA().Start(ctx) - return errGrp.Wait() + ex.batchSubmitter.Start(ctx) + ex.batchSubmitter.DA().Start(ctx) + return ctx.ErrGrp().Wait() } func (ex *Executor) Close() { - ex.batch.Close() - ex.db.Close() -} - -func (ex *Executor) RegisterQuerier() { - ex.server.RegisterQuerier("/withdrawal/:sequence", func(c *fiber.Ctx) error { - sequenceStr := c.Params("sequence") - if sequenceStr == "" { - return errors.New("sequence is required") - } - sequence, err := strconv.ParseUint(sequenceStr, 10, 64) - if err != nil { - return err - } - res, err := ex.child.QueryWithdrawal(sequence) - if err != nil { - return err - } - return c.JSON(res) - }) - - ex.server.RegisterQuerier("/withdrawals/:address", func(c *fiber.Ctx) error { - address := c.Params("address") - if address == "" { - return errors.New("address is required") - } - - offset := c.QueryInt("offset", 0) - uoffset, err := types.SafeInt64ToUint64(int64(offset)) - if err != nil { - return err - } - - limit := c.QueryInt("limit", 10) - if limit > 100 { - limit = 100 - } - - ulimit, err := types.SafeInt64ToUint64(int64(limit)) - if err != nil { - return err - } - - descOrder := true - orderStr := c.Query("order", "desc") - if orderStr == "asc" { - descOrder = false - } - res, err := ex.child.QueryWithdrawals(address, uoffset, ulimit, descOrder) - if err != nil { - return err - } - return c.JSON(res) - }) - - ex.server.RegisterQuerier("/status", func(c *fiber.Ctx) error { - status, err := ex.GetStatus() - if err != nil { - return err - } - return c.JSON(status) - }) + ex.batchSubmitter.Close() } -func (ex *Executor) makeDANode(ctx context.Context, bridgeInfo ophosttypes.QueryBridgeResponse, daKeyringConfig *btypes.KeyringConfig) (executortypes.DANode, error) { +// makeDANode creates a DA node based on the bridge info +// - if the bridge chain type is INITIA and the host address is the same as the submitter, it returns the existing host node +// - if the bridge chain type is INITIA and the host address is different from the submitter, it returns a new host node +// - if the bridge chain type is CELESTIA, it returns a new celestia node +func (ex *Executor) makeDANode(ctx types.Context, bridgeInfo ophosttypes.QueryBridgeResponse, daKeyringConfig *btypes.KeyringConfig) (executortypes.DANode, error) { if ex.cfg.DisableBatchSubmitter { - return batch.NewNoopDA(), nil + return batchsubmitter.NewNoopDA(), nil } - batchInfo := ex.batch.BatchInfo() + batchInfo := ex.batchSubmitter.BatchInfo() if batchInfo == nil { return nil, errors.New("batch info is not set") } @@ -232,26 +155,26 @@ func (ex *Executor) makeDANode(ctx context.Context, bridgeInfo ophosttypes.Query // might not exist hostAddrStr, err := ex.host.BaseAccountAddressString() if err != nil && !errors.Is(err, types.ErrKeyNotSet) { - return nil, err + return nil, errors.Wrap(err, "failed to get host address") } else if err == nil && hostAddrStr == batchInfo.BatchInfo.Submitter { return ex.host, nil } hostda := host.NewHostV1( - ex.cfg.DANodeConfig(ex.homePath), - ex.db.WithPrefix([]byte(types.DAHostName)), - ex.logger.Named(types.DAHostName), + ex.cfg.DANodeConfig(), + ex.db.WithPrefix([]byte(types.DAName)), ) err = hostda.InitializeDA(ctx, bridgeInfo, daKeyringConfig) - return hostda, err + return hostda, errors.Wrap(err, "failed to initialize host DA") case ophosttypes.BatchInfo_CHAIN_TYPE_CELESTIA: - celestiada := celestia.NewDACelestia(ex.cfg.Version, ex.cfg.DANodeConfig(ex.homePath), - ex.db.WithPrefix([]byte(types.DACelestiaName)), - ex.logger.Named(types.DACelestiaName), + celestiada := celestia.NewDACelestia( + ex.cfg.Version, + ex.cfg.DANodeConfig(), + ex.db.WithPrefix([]byte(types.DAName)), ) - err := celestiada.Initialize(ctx, ex.batch, bridgeInfo.BridgeId, daKeyringConfig) + err := celestiada.Initialize(ctx, ex.batchSubmitter, bridgeInfo.BridgeId, daKeyringConfig) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to initialize celestia DA") } celestiada.RegisterDAHandlers() return celestiada, nil @@ -260,63 +183,71 @@ func (ex *Executor) makeDANode(ctx context.Context, bridgeInfo ophosttypes.Query return nil, fmt.Errorf("unsupported chain id for DA: %s", ophosttypes.BatchInfo_ChainType_name[int32(batchInfo.BatchInfo.ChainType)]) } -func (ex *Executor) getProcessedHeights(ctx context.Context, bridgeId uint64) (l1ProcessedHeight int64, l2ProcessedHeight int64, processedOutputIndex uint64, batchProcessedHeight int64, err error) { - var outputL1BlockNumber int64 - // get the last submitted output height before the start height from the host - if ex.cfg.L2StartHeight != 0 { +// getNodeStartHeights returns the start heights of the host, the child node, and the batch submitter, and the start output index +func (ex *Executor) getNodeStartHeights(ctx types.Context, bridgeId uint64) (l1StartHeight int64, l2StartHeight int64, startOutputIndex uint64, batchStartHeight int64, err error) { + var outputL1Height, outputL2Height int64 + var outputIndex uint64 + + if ex.host.Node().GetSyncedHeight() == 0 || ex.child.Node().GetSyncedHeight() == 0 { + // get the last submitted output height before the start height from the host output, err := ex.host.QueryOutputByL2BlockNumber(ctx, bridgeId, ex.cfg.L2StartHeight) if err != nil { - return 0, 0, 0, 0, err + return 0, 0, 0, 0, errors.Wrap(err, "failed to query output by l2 block number") } else if output != nil { - outputL1BlockNumber = types.MustUint64ToInt64(output.OutputProposal.L1BlockNumber) - l2ProcessedHeight = types.MustUint64ToInt64(output.OutputProposal.L2BlockNumber) - processedOutputIndex = output.OutputIndex + outputL1Height = types.MustUint64ToInt64(output.OutputProposal.L1BlockNumber) + outputL2Height = types.MustUint64ToInt64(output.OutputProposal.L2BlockNumber) + outputIndex = output.OutputIndex } + l2StartHeight = outputL2Height + 1 + startOutputIndex = outputIndex + 1 } - if ex.cfg.DisableAutoSetL1Height { - l1ProcessedHeight = ex.cfg.L1StartHeight - } else { - // get the bridge start height from the host - l1ProcessedHeight, err = ex.host.QueryCreateBridgeHeight(ctx, bridgeId) - if err != nil { - return 0, 0, 0, 0, err - } + if ex.host.Node().GetSyncedHeight() == 0 { + // use l1 start height from the config if auto set is disabled + if ex.cfg.DisableAutoSetL1Height { + l1StartHeight = ex.cfg.L1StartHeight + } else { + // get the bridge start height from the host + l1StartHeight, err = ex.host.QueryCreateBridgeHeight(ctx, bridgeId) + if err != nil { + return 0, 0, 0, 0, errors.Wrap(err, "failed to query create bridge height") + } - l1Sequence, err := ex.child.QueryNextL1Sequence(ctx, 0) - if err != nil { - return 0, 0, 0, 0, err - } + childNextL1Sequence, err := ex.child.QueryNextL1Sequence(ctx, 0) + if err != nil { + return 0, 0, 0, 0, errors.Wrap(err, "failed to query next l1 sequence") + } - // query l1Sequence tx height - depositTxHeight, err := ex.host.QueryDepositTxHeight(ctx, bridgeId, l1Sequence) - if err != nil { - return 0, 0, 0, 0, err - } else if depositTxHeight == 0 && l1Sequence > 1 { - // query l1Sequence - 1 tx height - depositTxHeight, err = ex.host.QueryDepositTxHeight(ctx, bridgeId, l1Sequence-1) + // query last NextL1Sequence tx height + depositTxHeight, err := ex.host.QueryDepositTxHeight(ctx, bridgeId, childNextL1Sequence) if err != nil { - return 0, 0, 0, 0, err + return 0, 0, 0, 0, errors.Wrap(err, "failed to query deposit tx height") + } else if depositTxHeight == 0 && childNextL1Sequence > 1 { + // if the deposit tx with next_l1_sequence is not found + // query deposit tx with next_l1_sequence-1 tx + depositTxHeight, err = ex.host.QueryDepositTxHeight(ctx, bridgeId, childNextL1Sequence-1) + if err != nil { + return 0, 0, 0, 0, errors.Wrap(err, "failed to query deposit tx height") + } + } + + if l1StartHeight < depositTxHeight { + l1StartHeight = depositTxHeight } - } - if depositTxHeight > l1ProcessedHeight { - l1ProcessedHeight = depositTxHeight - } - if outputL1BlockNumber != 0 && outputL1BlockNumber < l1ProcessedHeight { - l1ProcessedHeight = outputL1BlockNumber - } - } - if l1ProcessedHeight > 0 { - l1ProcessedHeight-- + if outputL1Height != 0 && outputL1Height+1 < l1StartHeight { + l1StartHeight = outputL1Height + 1 + } + } } - if ex.cfg.BatchStartHeight > 0 { - batchProcessedHeight = ex.cfg.BatchStartHeight - 1 + if ex.batchSubmitter.Node().GetSyncedHeight() == 0 { + batchStartHeight = ex.cfg.BatchStartHeight } - return l1ProcessedHeight, l2ProcessedHeight, processedOutputIndex, batchProcessedHeight, err + return } +// getKeyringConfigs returns the keyring configs for the host, the child node, the child oracle node, and the DA node func (ex *Executor) getKeyringConfigs(bridgeInfo ophosttypes.QueryBridgeResponse) ( hostKeyringConfig *btypes.KeyringConfig, childKeyringConfig *btypes.KeyringConfig, diff --git a/executor/host/batch.go b/executor/host/batch.go index b7145e0..3419a01 100644 --- a/executor/host/batch.go +++ b/executor/host/batch.go @@ -1,16 +1,15 @@ package host import ( - "context" - "errors" - nodetypes "github.com/initia-labs/opinit-bots/node/types" hostprovider "github.com/initia-labs/opinit-bots/provider/host" "github.com/initia-labs/opinit-bots/types" "go.uber.org/zap" + + "github.com/pkg/errors" ) -func (h *Host) recordBatchHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (h *Host) recordBatchHandler(ctx types.Context, args nodetypes.EventHandlerArgs) error { hostAddress, err := h.BaseAccountAddressString() if err != nil { if errors.Is(err, types.ErrKeyNotSet) { @@ -21,29 +20,29 @@ func (h *Host) recordBatchHandler(_ context.Context, args nodetypes.EventHandler submitter, err := hostprovider.ParseMsgRecordBatch(args.EventAttributes) if err != nil { - return err + return errors.Wrap(err, "failed to parse record batch event") } if submitter != hostAddress { return nil } - h.Logger().Info("record batch", + ctx.Logger().Info("record batch", zap.String("submitter", submitter), ) return nil } -func (h *Host) updateBatchInfoHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (h *Host) updateBatchInfoHandler(ctx types.Context, args nodetypes.EventHandlerArgs) error { bridgeId, submitter, chain, outputIndex, l2BlockNumber, err := hostprovider.ParseMsgUpdateBatchInfo(args.EventAttributes) if err != nil { - return err + return errors.Wrap(err, "failed to parse update batch info event") } if bridgeId != h.BridgeId() { // pass other bridge deposit event return nil } - h.Logger().Info("update batch info", + ctx.Logger().Info("update batch info", zap.String("chain", chain), zap.String("submitter", submitter), zap.Uint64("output_index", outputIndex), diff --git a/executor/host/batch_test.go b/executor/host/batch_test.go new file mode 100644 index 0000000..5ffb5a2 --- /dev/null +++ b/executor/host/batch_test.go @@ -0,0 +1,318 @@ +package host + +import ( + "context" + "strconv" + "testing" + + abcitypes "github.com/cometbft/cometbft/abci/types" + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/db" + "github.com/initia-labs/opinit-bots/node" + "github.com/initia-labs/opinit-bots/node/broadcaster" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + hostprovider "github.com/initia-labs/opinit-bots/provider/host" + "github.com/initia-labs/opinit-bots/types" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func RecordBatchEvents( + submitter string, +) []abcitypes.EventAttribute { + return []abcitypes.EventAttribute{ + { + Key: ophosttypes.AttributeKeySubmitter, + Value: submitter, + }, + } +} + +func TestRecordBatchHandler(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + cdc, txConfig, err := hostprovider.GetCodec("init") + require.NoError(t, err) + + broadcaster, err := broadcaster.NewTestBroadcaster(cdc, db.WithPrefix([]byte("test_host")), nil, txConfig, "init", 1) + require.NoError(t, err) + + batchSubmitter, err := broadcaster.AccountByIndex(0) + require.NoError(t, err) + + hostNode := node.NewTestNode(nodetypes.NodeConfig{}, db.WithPrefix([]byte("test_host")), nil, nil, nil, broadcaster) + bridgeInfo := ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + } + + h := Host{ + BaseHost: hostprovider.NewTestBaseHost(0, hostNode, bridgeInfo, nodetypes.NodeConfig{}, nil), + } + + emptyBroadcasterHost := Host{ + BaseHost: hostprovider.NewTestBaseHost( + 0, + node.NewTestNode(nodetypes.NodeConfig{}, db.WithPrefix([]byte("test_host")), nil, nil, nil, nil), + bridgeInfo, + nodetypes.NodeConfig{}, + nil, + ), + } + + cases := []struct { + name string + host Host + eventHandlerArgs nodetypes.EventHandlerArgs + expected func() (msg string, fields []zapcore.Field) + err bool + }{ + { + name: "success", + host: h, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: RecordBatchEvents(batchSubmitter.GetAddressString()), + }, + expected: func() (msg string, fields []zapcore.Field) { + msg = "record batch" + fields = []zapcore.Field{ + zap.String("submitter", batchSubmitter.GetAddressString()), + } + return msg, fields + }, + err: false, + }, + + { + name: "different submitter", + host: h, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: RecordBatchEvents("another_submitter"), + }, + expected: nil, + err: false, + }, + { + name: "empty broadcaster", + host: emptyBroadcasterHost, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: RecordBatchEvents(batchSubmitter.GetAddressString()), + }, + expected: nil, + err: false, + }, + { + name: "missing event attribute submitter", + host: emptyBroadcasterHost, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: RecordBatchEvents(batchSubmitter.GetAddressString())[1:], + }, + expected: nil, + err: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + logger, observedLogs := logCapturer() + ctx := types.NewContext(context.Background(), logger, "") + + err := h.recordBatchHandler(ctx, tc.eventHandlerArgs) + if !tc.err { + require.NoError(t, err) + if tc.expected != nil { + logs := observedLogs.TakeAll() + require.Len(t, logs, 1) + + expectedMsg, expectedFields := tc.expected() + require.Equal(t, expectedMsg, logs[0].Message) + require.Equal(t, expectedFields, logs[0].Context) + } + } else { + require.Error(t, err) + } + }) + } + require.NoError(t, err) +} + +func UpdateBatchInfoEvents( + bridgeId uint64, + chainType ophosttypes.BatchInfo_ChainType, + submitter string, + finalizedOutputIndex uint64, + l2BlockNumber uint64, +) []abcitypes.EventAttribute { + return []abcitypes.EventAttribute{ + { + Key: ophosttypes.AttributeKeyBridgeId, + Value: strconv.FormatUint(bridgeId, 10), + }, + { + Key: ophosttypes.AttributeKeyBatchChainType, + Value: chainType.StringWithoutPrefix(), + }, + { + Key: ophosttypes.AttributeKeyBatchSubmitter, + Value: submitter, + }, + { + Key: ophosttypes.AttributeKeyFinalizedOutputIndex, + Value: strconv.FormatUint(finalizedOutputIndex, 10), + }, + { + Key: ophosttypes.AttributeKeyFinalizedL2BlockNumber, + Value: strconv.FormatUint(l2BlockNumber, 10), + }, + } +} + +func TestUpdateBatchInfoHandler(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + hostNode := node.NewTestNode(nodetypes.NodeConfig{}, db.WithPrefix([]byte("test_host")), nil, nil, nil, nil) + bridgeInfo := ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + } + + mockBatch := NewMockBatch() + h := Host{ + BaseHost: hostprovider.NewTestBaseHost(0, hostNode, bridgeInfo, nodetypes.NodeConfig{}, nil), + batch: mockBatch, + } + + fullAttributes := UpdateBatchInfoEvents(1, ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, "submitter", 1, 1) + + cases := []struct { + name string + host Host + eventHandlerArgs nodetypes.EventHandlerArgs + expectedBatchInfo *mockBatchInfo + expectedLog func() (msg string, fields []zapcore.Field) + err bool + }{ + { + name: "success", + host: h, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: UpdateBatchInfoEvents(1, ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, "submitter", 1, 1), + }, + expectedBatchInfo: &mockBatchInfo{ + chain: "INITIA", + submitter: "submitter", + outputIndex: 1, + l2BlockNumber: 1, + }, + expectedLog: func() (msg string, fields []zapcore.Field) { + msg = "update batch info" + fields = []zapcore.Field{ + zap.String("chain", "INITIA"), + zap.String("submitter", "submitter"), + zap.Uint64("output_index", 1), + zap.Int64("l2_block_number", 1), + } + return msg, fields + }, + err: false, + }, + { + name: "unspecified chain type", + host: h, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: UpdateBatchInfoEvents(1, ophosttypes.BatchInfo_CHAIN_TYPE_UNSPECIFIED, "submitter", 1, 1), + }, + expectedBatchInfo: nil, + expectedLog: nil, + err: true, + }, + { + name: "different bridge id", + host: h, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: UpdateBatchInfoEvents(2, ophosttypes.BatchInfo_CHAIN_TYPE_CELESTIA, "submitter", 1, 1), + }, + expectedBatchInfo: nil, + expectedLog: nil, + err: false, + }, + { + name: "missing event attribute bridge id", + host: h, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: fullAttributes[1:], + }, + expectedBatchInfo: nil, + expectedLog: nil, + err: true, + }, + { + name: "missing event attribute batch chain type", + host: h, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:1], fullAttributes[2:]...), + }, + expectedBatchInfo: nil, + expectedLog: nil, + err: true, + }, + { + name: "missing event attribute submitter", + host: h, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:2], fullAttributes[3:]...), + }, + expectedBatchInfo: nil, + expectedLog: nil, + err: true, + }, + { + name: "missing event attribute output index", + host: h, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:3], fullAttributes[4:]...), + }, + expectedBatchInfo: nil, + expectedLog: nil, + err: true, + }, + { + name: "missing event attribute l2 block number", + host: h, + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: fullAttributes[:4], + }, + expectedBatchInfo: nil, + expectedLog: nil, + err: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + logger, observedLogs := logCapturer() + ctx := types.NewContext(context.Background(), logger, "") + + err := h.updateBatchInfoHandler(ctx, tc.eventHandlerArgs) + if !tc.err { + require.NoError(t, err) + if tc.expectedLog != nil { + logs := observedLogs.TakeAll() + require.Len(t, logs, 1) + + expectedMsg, expectedFields := tc.expectedLog() + require.Equal(t, expectedMsg, logs[0].Message) + require.Equal(t, expectedFields, logs[0].Context) + } + if tc.expectedBatchInfo != nil { + require.Equal(t, tc.expectedBatchInfo, mockBatch.info) + } + } else { + require.Error(t, err) + } + mockBatch.info = nil + }) + } + require.NoError(t, err) +} diff --git a/executor/host/common_test.go b/executor/host/common_test.go new file mode 100644 index 0000000..4d3951d --- /dev/null +++ b/executor/host/common_test.go @@ -0,0 +1,143 @@ +package host + +import ( + "context" + + opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" + + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/authz" +) + +type mockChild struct { + db types.DB + cdc codec.Codec + baseAccount string + oracleAccount string + nextL1Sequence uint64 + processedMsgs []btypes.ProcessedMsgs +} + +func NewMockChild(db types.DB, cdc codec.Codec, baseAccount string, oracleAccount string, nextL1Sequence uint64) *mockChild { + return &mockChild{ + db: db, + cdc: cdc, + baseAccount: baseAccount, + oracleAccount: oracleAccount, + nextL1Sequence: nextL1Sequence, + processedMsgs: make([]btypes.ProcessedMsgs, 0), + } +} + +func (m *mockChild) DB() types.DB { + return m.db +} + +func (m *mockChild) Codec() codec.Codec { + return m.cdc +} + +func (m *mockChild) HasBroadcaster() bool { + return m.baseAccount != "" || m.oracleAccount != "" +} + +func (m *mockChild) BroadcastProcessedMsgs(msgs ...btypes.ProcessedMsgs) { + m.processedMsgs = append(m.processedMsgs, msgs...) +} + +func (m *mockChild) GetMsgFinalizeTokenDeposit( + from string, + to string, + coin sdk.Coin, + l1Sequence uint64, + blockHeight int64, + l1Denom string, + data []byte, +) (sdk.Msg, string, error) { + if m.baseAccount == "" { + return nil, "", nil + } + return opchildtypes.NewMsgFinalizeTokenDeposit( + m.baseAccount, + from, + to, + coin, + l1Sequence, + types.MustInt64ToUint64(blockHeight), + l1Denom, + data, + ), m.baseAccount, nil +} + +func (m *mockChild) GetMsgUpdateOracle( + height int64, + data []byte, +) (sdk.Msg, string, error) { + if m.oracleAccount == "" { + return nil, "", nil + } + msg := opchildtypes.NewMsgUpdateOracle( + m.baseAccount, + types.MustInt64ToUint64(height), + data, + ) + + msgsAny := make([]*cdctypes.Any, 1) + any, err := cdctypes.NewAnyWithValue(msg) + if err != nil { + return nil, "", errors.Wrap(err, "failed to create any") + } + msgsAny[0] = any + + return &authz.MsgExec{ + Grantee: m.oracleAccount, + Msgs: msgsAny, + }, m.oracleAccount, nil +} + +func (m *mockChild) QueryNextL1Sequence(ctx context.Context, height int64) (uint64, error) { + if m.nextL1Sequence == 0 { + return 0, errors.New("no next L1 sequence") + } + return m.nextL1Sequence, nil +} + +var _ childNode = (*mockChild)(nil) + +type mockBatchInfo struct { + chain string + submitter string + outputIndex uint64 + l2BlockNumber int64 +} + +type mockBatch struct { + info *mockBatchInfo +} + +func NewMockBatch() *mockBatch { + return &mockBatch{} +} + +func (m *mockBatch) UpdateBatchInfo(chain string, submitter string, outputIndex uint64, l2BlockNumber int64) { + m.info = &mockBatchInfo{ + chain: chain, + submitter: submitter, + outputIndex: outputIndex, + l2BlockNumber: l2BlockNumber, + } +} + +var _ batchNode = (*mockBatch)(nil) + +func logCapturer() (*zap.Logger, *observer.ObservedLogs) { + core, logs := observer.New(zap.DebugLevel) + return zap.New(core), logs +} diff --git a/executor/host/deposit.go b/executor/host/deposit.go index 5d4e742..3f8b947 100644 --- a/executor/host/deposit.go +++ b/executor/host/deposit.go @@ -1,20 +1,20 @@ package host import ( - "context" - "errors" - "cosmossdk.io/math" nodetypes "github.com/initia-labs/opinit-bots/node/types" hostprovider "github.com/initia-labs/opinit-bots/provider/host" + "github.com/initia-labs/opinit-bots/types" sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/pkg/errors" ) -func (h *Host) initiateDepositHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (h *Host) initiateDepositHandler(_ types.Context, args nodetypes.EventHandlerArgs) error { bridgeId, l1Sequence, from, to, l1Denom, l2Denom, amount, data, err := hostprovider.ParseMsgInitiateDeposit(args.EventAttributes) if err != nil { - return err + return errors.Wrap(err, "failed to parse initiate deposit event") } if bridgeId != h.BridgeId() { // pass other bridge deposit event @@ -25,47 +25,25 @@ func (h *Host) initiateDepositHandler(_ context.Context, args nodetypes.EventHan return nil } - msg, sender, err := h.handleInitiateDeposit( - l1Sequence, - args.BlockHeight, - from, - to, - l1Denom, - l2Denom, - amount, - data, - ) - if err != nil { - return err - } else if msg != nil { - h.AppendMsgQueue(msg, sender) - } - return nil -} - -func (h *Host) handleInitiateDeposit( - l1Sequence uint64, - blockHeight int64, - from string, - to string, - l1Denom string, - l2Denom string, - amount string, - data []byte, -) (sdk.Msg, string, error) { coinAmount, ok := math.NewIntFromString(amount) if !ok { - return nil, "", errors.New("invalid amount") + return errors.New("invalid coin amount") } coin := sdk.NewCoin(l2Denom, coinAmount) - return h.child.GetMsgFinalizeTokenDeposit( + msg, sender, err := h.child.GetMsgFinalizeTokenDeposit( from, to, coin, l1Sequence, - blockHeight, + args.BlockHeight, l1Denom, data, ) + if err != nil { + return errors.Wrap(err, "failed to handle initiate deposit") + } else if msg != nil { + h.AppendMsgQueue(msg, sender) + } + return nil } diff --git a/executor/host/deposit_test.go b/executor/host/deposit_test.go new file mode 100644 index 0000000..bc202d7 --- /dev/null +++ b/executor/host/deposit_test.go @@ -0,0 +1,281 @@ +package host + +import ( + "encoding/hex" + "strconv" + "testing" + "time" + + abcitypes "github.com/cometbft/cometbft/abci/types" + opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/db" + "github.com/initia-labs/opinit-bots/node" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + hostprovider "github.com/initia-labs/opinit-bots/provider/host" + "github.com/initia-labs/opinit-bots/types" + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func InitiateTokenDepositEvents( + bridgeId uint64, + sender, to string, + amount sdk.Coin, + data []byte, + l1Sequence uint64, + l2Denom string, +) []abcitypes.EventAttribute { + return []abcitypes.EventAttribute{ + { + Key: ophosttypes.AttributeKeyBridgeId, + Value: strconv.FormatUint(bridgeId, 10), + }, + { + Key: ophosttypes.AttributeKeyL1Sequence, + Value: strconv.FormatUint(l1Sequence, 10), + }, + { + Key: ophosttypes.AttributeKeyFrom, + Value: sender, + }, + { + Key: ophosttypes.AttributeKeyTo, + Value: to, + }, + { + Key: ophosttypes.AttributeKeyL1Denom, + Value: amount.Denom, + }, + { + Key: ophosttypes.AttributeKeyL2Denom, + Value: l2Denom, + }, + { + Key: ophosttypes.AttributeKeyAmount, + Value: amount.Amount.String(), + }, + { + Key: ophosttypes.AttributeKeyData, + Value: hex.EncodeToString(data), + }, + } +} + +func TestInitializeDepositHandler(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + bridgeInfo := ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + } + hostNode := node.NewTestNode(nodetypes.NodeConfig{}, db.WithPrefix([]byte("test_host")), nil, nil, nil, nil) + childCodec, _, err := childprovider.GetCodec("init") + h := Host{ + BaseHost: hostprovider.NewTestBaseHost(0, hostNode, bridgeInfo, nodetypes.NodeConfig{}, nil), + } + + fullAttributes := InitiateTokenDepositEvents(1, "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l1Denom", 100), []byte("databytes"), 1, "l2denom") + + cases := []struct { + name string + initialL1Sequence uint64 + child *mockChild + eventHandlerArgs nodetypes.EventHandlerArgs + expected sdk.Msg + err bool + }{ + { + name: "success", + initialL1Sequence: 0, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "", 1), + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + EventAttributes: InitiateTokenDepositEvents(1, "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l1Denom", 100), []byte("databytes"), 1, "l2denom"), + }, + expected: opchildtypes.NewMsgFinalizeTokenDeposit("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l2denom", 100), 1, 1, "l1Denom", []byte("databytes")), + err: false, + }, + { + name: "another bridge id", + initialL1Sequence: 0, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "", 1), + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + EventAttributes: InitiateTokenDepositEvents(2, "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l1Denom", 100), []byte("databytes"), 1, "l2denom"), + }, + expected: nil, + err: false, + }, + { + name: "empty child broadcaster", + initialL1Sequence: 0, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "", "", 1), + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + EventAttributes: InitiateTokenDepositEvents(2, "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l1Denom", 100), []byte("databytes"), 1, "l2denom"), + }, + expected: nil, + err: false, + }, + { + name: "processed l1 sequence", + initialL1Sequence: 2, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "", 1), + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + EventAttributes: InitiateTokenDepositEvents(2, "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l1Denom", 100), []byte("databytes"), 1, "l2denom"), + }, + expected: nil, + err: false, + }, + { + name: "missing event attribute bridge id", + initialL1Sequence: 0, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "", 1), + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + EventAttributes: fullAttributes[1:], + }, + expected: opchildtypes.NewMsgFinalizeTokenDeposit("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l2denom", 100), 1, 1, "l1Denom", []byte("databytes")), + err: true, + }, + { + name: "missing event attribute l1 sequence", + initialL1Sequence: 0, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "", 1), + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + EventAttributes: append(fullAttributes[:1], fullAttributes[2:]...), + }, + expected: opchildtypes.NewMsgFinalizeTokenDeposit("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l2denom", 100), 1, 1, "l1Denom", []byte("databytes")), + err: true, + }, + { + name: "missing event attribute from", + initialL1Sequence: 0, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "", 1), + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + EventAttributes: append(fullAttributes[:2], fullAttributes[3:]...), + }, + expected: opchildtypes.NewMsgFinalizeTokenDeposit("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l2denom", 100), 1, 1, "l1Denom", []byte("databytes")), + err: true, + }, + { + name: "missing event attribute to", + initialL1Sequence: 0, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "", 1), + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + EventAttributes: append(fullAttributes[:3], fullAttributes[4:]...), + }, + expected: opchildtypes.NewMsgFinalizeTokenDeposit("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l2denom", 100), 1, 1, "l1Denom", []byte("databytes")), + err: true, + }, + { + name: "missing event attribute l1 denom", + initialL1Sequence: 0, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "", 1), + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + EventAttributes: append(fullAttributes[:4], fullAttributes[5:]...), + }, + expected: opchildtypes.NewMsgFinalizeTokenDeposit("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l2denom", 100), 1, 1, "l1Denom", []byte("databytes")), + err: true, + }, + { + name: "missing event attribute l2 denom", + initialL1Sequence: 0, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "", 1), + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + EventAttributes: append(fullAttributes[:5], fullAttributes[6:]...), + }, + expected: opchildtypes.NewMsgFinalizeTokenDeposit("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l2denom", 100), 1, 1, "l1Denom", []byte("databytes")), + err: true, + }, + { + name: "missing event attribute amount", + initialL1Sequence: 0, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "", 1), + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + EventAttributes: append(fullAttributes[:6], fullAttributes[7:]...), + }, + expected: opchildtypes.NewMsgFinalizeTokenDeposit("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l2denom", 100), 1, 1, "l1Denom", []byte("databytes")), + err: true, + }, + { + name: "missing event attribute data", + initialL1Sequence: 0, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "", 1), + eventHandlerArgs: nodetypes.EventHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + EventAttributes: fullAttributes[:7], + }, + expected: opchildtypes.NewMsgFinalizeTokenDeposit("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", sdk.NewInt64Coin("l2denom", 100), 1, 1, "l1Denom", []byte("databytes")), + err: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + h.initialL1Sequence = tc.initialL1Sequence + h.child = tc.child + + err := h.initiateDepositHandler(types.Context{}, tc.eventHandlerArgs) + if !tc.err { + require.NoError(t, err) + msg := h.GetMsgQueue() + if tc.expected != nil { + require.Equal(t, 1, len(msg)) + require.Equal(t, tc.expected, msg[tc.child.baseAccount][0]) + } else { + require.Empty(t, msg[tc.child.baseAccount]) + } + } else { + require.Error(t, err) + } + h.EmptyMsgQueue() + }) + } + require.NoError(t, err) +} diff --git a/executor/host/handler.go b/executor/host/handler.go index 9f7fb75..34c02af 100644 --- a/executor/host/handler.go +++ b/executor/host/handler.go @@ -1,78 +1,61 @@ package host import ( - "context" - "slices" - "time" - + "github.com/initia-labs/opinit-bots/node" "github.com/initia-labs/opinit-bots/types" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/initia-labs/opinit-bots/node/broadcaster" btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" nodetypes "github.com/initia-labs/opinit-bots/node/types" + + "github.com/pkg/errors" ) -func (h *Host) beginBlockHandler(_ context.Context, args nodetypes.BeginBlockArgs) error { +func (h *Host) beginBlockHandler(_ types.Context, _ nodetypes.BeginBlockArgs) error { h.EmptyMsgQueue() h.EmptyProcessedMsgs() + h.stage.Reset() return nil } -func (h *Host) endBlockHandler(_ context.Context, args nodetypes.EndBlockArgs) error { - // collect more msgs if block height is not latest - blockHeight := args.Block.Header.Height - msgQueues := h.GetMsgQueue() - - batchKVs := []types.RawKV{ - h.Node().SyncInfoToRawKV(blockHeight), +func (h *Host) endBlockHandler(_ types.Context, args nodetypes.EndBlockArgs) error { + err := node.SetSyncedHeight(h.stage, args.Block.Header.Height) + if err != nil { + return errors.Wrap(err, "failed to set synced height") } - if h.child.HasKey() { - for sender := range msgQueues { - msgQueue := msgQueues[sender] - for i := 0; i < len(msgQueue); i += 5 { - end := i + 5 - if end > len(msgQueue) { - end = len(msgQueue) - } - h.AppendProcessedMsgs(btypes.ProcessedMsgs{ - Sender: sender, - Msgs: slices.Clone(msgQueue[i:end]), - Timestamp: time.Now().UnixNano(), - Save: true, - }) - } - } + if h.child.HasBroadcaster() { + h.AppendProcessedMsgs(broadcaster.MsgsToProcessedMsgs(h.GetMsgQueue())...) - msgkvs, err := h.child.ProcessedMsgsToRawKV(h.GetProcessedMsgs(), false) + // save processed msgs to stage using child db + err := broadcaster.SaveProcessedMsgsBatch(h.stage.WithPrefixedKey(h.child.DB().PrefixedKey), h.child.Codec(), h.GetProcessedMsgs()) if err != nil { - return err + return errors.Wrap(err, "failed to save processed msgs on child db") } - batchKVs = append(batchKVs, msgkvs...) + } else { + h.EmptyProcessedMsgs() } - err := h.DB().RawBatchSet(batchKVs...) + err = h.stage.Commit() if err != nil { - return err - } - - for _, processedMsg := range h.GetProcessedMsgs() { - h.child.BroadcastMsgs(processedMsg) + return errors.Wrap(err, "failed to commit stage") } + h.child.BroadcastProcessedMsgs(h.GetProcessedMsgs()...) return nil } -func (h *Host) txHandler(_ context.Context, args nodetypes.TxHandlerArgs) error { +func (h *Host) txHandler(_ types.Context, args nodetypes.TxHandlerArgs) error { if args.BlockHeight == args.LatestHeight && args.TxIndex == 0 { msg, sender, err := h.oracleTxHandler(args.BlockHeight, args.Tx) if err != nil { - return err + return errors.Wrap(err, "failed to handle oracle tx") } else if msg != nil { h.AppendProcessedMsgs(btypes.ProcessedMsgs{ Sender: sender, Msgs: []sdk.Msg{msg}, - Timestamp: time.Now().UnixNano(), + Timestamp: types.CurrentNanoTimestamp(), Save: false, }) } diff --git a/executor/host/handler_test.go b/executor/host/handler_test.go new file mode 100644 index 0000000..d6781e5 --- /dev/null +++ b/executor/host/handler_test.go @@ -0,0 +1,380 @@ +package host + +import ( + "testing" + "time" + + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/db" + "github.com/initia-labs/opinit-bots/node" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + hostprovider "github.com/initia-labs/opinit-bots/provider/host" + "github.com/initia-labs/opinit-bots/types" + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func TestBeginBlockHandler(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + hostNode := node.NewTestNode(nodetypes.NodeConfig{}, db.WithPrefix([]byte("test_host")), nil, nil, nil, nil) + + h := Host{ + BaseHost: hostprovider.NewTestBaseHost(0, hostNode, ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + }, nodetypes.NodeConfig{}, nil), + stage: db.NewStage(), + } + + msgQueue := h.GetMsgQueue() + require.Empty(t, msgQueue["init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5"]) + h.AppendMsgQueue(opchildtypes.NewMsgUpdateOracle("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", 1, []byte("oracle_tx")), "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5") + msgQueue = h.GetMsgQueue() + require.Len(t, msgQueue["init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5"], 1) + + msgs := h.GetProcessedMsgs() + require.Empty(t, msgs) + h.AppendProcessedMsgs(btypes.ProcessedMsgs{}) + msgs = h.GetProcessedMsgs() + require.Len(t, msgs, 1) + + require.Equal(t, 0, h.stage.Len()) + err = h.stage.Set([]byte("key"), []byte("value")) + require.NoError(t, err) + require.Equal(t, 1, h.stage.Len()) + + err = h.beginBlockHandler(types.Context{}, nodetypes.BeginBlockArgs{}) + require.NoError(t, err) + + msgQueue = h.GetMsgQueue() + require.Empty(t, msgQueue["init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5"]) + + msgs = h.GetProcessedMsgs() + require.Empty(t, msgs) + + require.Equal(t, 0, h.stage.Len()) +} + +func TestEndBlockHandler(t *testing.T) { + childCodec, _, err := childprovider.GetCodec("init") + + mockCount := int64(0) + mockTimestampFetcher := func() int64 { + mockCount++ + return mockCount + } + types.CurrentNanoTimestamp = mockTimestampFetcher + + cases := []struct { + name string + child *mockChild + msgQueue map[string][]sdk.Msg + processedMsgs []btypes.ProcessedMsgs + dbChanges []types.KV + endBlockArgs nodetypes.EndBlockArgs + expectedProcessedMsgs []btypes.ProcessedMsgs + expectedDB []types.KV + err bool + }{ + { + name: "success", + child: NewMockChild(nil, childCodec, "sender0", "sender1", 1), + msgQueue: map[string][]sdk.Msg{ + "sender0": {&opchildtypes.MsgFinalizeTokenDeposit{}}, + }, + processedMsgs: []btypes.ProcessedMsgs{ + { + Sender: "sender1", + Msgs: []sdk.Msg{&opchildtypes.MsgUpdateOracle{}}, + Timestamp: 10000, + Save: true, + }, + }, + dbChanges: []types.KV{ + { + Key: []byte("key1"), + Value: []byte("value1"), + }, + }, + endBlockArgs: nodetypes.EndBlockArgs{ + Block: cmtproto.Block{ + Header: cmtproto.Header{ + Height: 10, + }, + }, + }, + expectedProcessedMsgs: []btypes.ProcessedMsgs{ + { + Sender: "sender1", + Msgs: []sdk.Msg{&opchildtypes.MsgUpdateOracle{}}, + Timestamp: 10000, + Save: true, + }, + { + Sender: "sender0", + Msgs: []sdk.Msg{&opchildtypes.MsgFinalizeTokenDeposit{}}, + Timestamp: 1, + Save: true, + }, + }, + expectedDB: []types.KV{ + { + Key: []byte("test_host/key1"), + Value: []byte("value1"), + }, + { + Key: []byte("test_host/synced_height"), + Value: []byte("10"), + }, + { + Key: append([]byte("test_child/processed_msgs/"), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x27, 0x10}...), + Value: []byte(`{"sender":"sender1","msgs":["{\"@type\":\"/opinit.opchild.v1.MsgUpdateOracle\",\"sender\":\"\",\"height\":\"0\",\"data\":null}"],"timestamp":10000,"save":true}`), + }, + { + Key: append([]byte("test_child/processed_msgs/"), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}...), + Value: []byte(`{"sender":"sender0","msgs":["{\"@type\":\"/opinit.opchild.v1.MsgFinalizeTokenDeposit\",\"sender\":\"\",\"from\":\"\",\"to\":\"\",\"amount\":{\"denom\":\"\",\"amount\":\"0\"},\"sequence\":\"0\",\"height\":\"0\",\"base_denom\":\"\",\"data\":null}"],"timestamp":1,"save":true}`), + }, + }, + err: false, + }, + { + name: "empty changes", + child: NewMockChild(nil, childCodec, "sender0", "sender1", 1), + msgQueue: nil, + processedMsgs: nil, + dbChanges: nil, + endBlockArgs: nodetypes.EndBlockArgs{ + Block: cmtproto.Block{ + Header: cmtproto.Header{ + Height: 15, + }, + }, + }, + expectedProcessedMsgs: nil, + expectedDB: []types.KV{ + { + Key: []byte("test_host/synced_height"), + Value: []byte("15"), + }, + }, + err: false, + }, + { + name: "child no broadcaster", + child: NewMockChild(nil, childCodec, "", "", 1), + msgQueue: map[string][]sdk.Msg{ + "sender0": {&opchildtypes.MsgFinalizeTokenDeposit{}}, + }, + processedMsgs: []btypes.ProcessedMsgs{ + { + Sender: "sender1", + Msgs: []sdk.Msg{&opchildtypes.MsgUpdateOracle{}}, + Timestamp: 10000, + Save: true, + }, + }, + dbChanges: nil, + endBlockArgs: nodetypes.EndBlockArgs{ + Block: cmtproto.Block{ + Header: cmtproto.Header{ + Height: 10, + }, + }, + }, + expectedProcessedMsgs: nil, + expectedDB: []types.KV{ + { + Key: []byte("test_host/synced_height"), + Value: []byte("10"), + }, + }, + err: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + db, err := db.NewMemDB() + defer func() { + require.NoError(t, db.Close()) + }() + hostdb := db.WithPrefix([]byte("test_host")) + require.NoError(t, err) + hostNode := node.NewTestNode(nodetypes.NodeConfig{}, hostdb, nil, nil, nil, nil) + tc.child.db = db.WithPrefix([]byte("test_child")) + + h := Host{ + BaseHost: hostprovider.NewTestBaseHost(0, hostNode, ophosttypes.QueryBridgeResponse{}, nodetypes.NodeConfig{}, nil), + child: tc.child, + stage: hostdb.NewStage(), + } + + for sender, msgs := range tc.msgQueue { + for _, msg := range msgs { + h.AppendMsgQueue(msg, sender) + } + } + for _, processedMsgs := range tc.processedMsgs { + h.AppendProcessedMsgs(processedMsgs) + } + + for _, kv := range tc.dbChanges { + err := h.stage.Set(kv.Key, kv.Value) + require.NoError(t, err) + } + + err = h.endBlockHandler(types.Context{}, tc.endBlockArgs) + if !tc.err { + require.NoError(t, err) + for i := range tc.expectedProcessedMsgs { + expectedMsg, err := tc.expectedProcessedMsgs[i].MarshalInterfaceJSON(childCodec) + require.NoError(t, err) + actualMsg, err := tc.child.processedMsgs[i].MarshalInterfaceJSON(childCodec) + require.NoError(t, err) + require.Equal(t, expectedMsg, actualMsg) + } + for _, kv := range tc.expectedDB { + value, err := db.Get(kv.Key) + require.NoError(t, err) + require.Equal(t, kv.Value, value) + } + } else { + require.Error(t, err) + } + }) + } + require.NoError(t, err) +} + +func TestTxHandler(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + hostNode := node.NewTestNode(nodetypes.NodeConfig{}, db.WithPrefix([]byte("test_host")), nil, nil, nil, nil) + childCodec, _, err := childprovider.GetCodec("init") + + cases := []struct { + name string + oracleEnabled bool + child *mockChild + txHandlerArgs nodetypes.TxHandlerArgs + expected func() (sender string, msg sdk.Msg, err error) + err bool + }{ + { + name: "success", + oracleEnabled: true, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", 1), + txHandlerArgs: nodetypes.TxHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + Tx: []byte("oracle_tx"), + }, + expected: func() (sender string, msg sdk.Msg, err error) { + msg, err = childprovider.CreateAuthzMsg("init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", opchildtypes.NewMsgUpdateOracle("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", 1, []byte("oracle_tx"))) + sender = "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5" + return sender, msg, err + }, + err: false, + }, + { + name: "empty tx", + oracleEnabled: true, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", 1), + txHandlerArgs: nodetypes.TxHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + Tx: []byte(""), + }, + expected: func() (sender string, msg sdk.Msg, err error) { + msg, err = childprovider.CreateAuthzMsg("init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", opchildtypes.NewMsgUpdateOracle("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", 1, []byte(""))) + sender = "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5" + return sender, msg, err + }, + err: false, + }, + { + name: "old height", + oracleEnabled: true, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", 1), + txHandlerArgs: nodetypes.TxHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 3, + TxIndex: 0, + Tx: []byte(""), + }, + expected: nil, + err: false, + }, + { + name: "another tx", + oracleEnabled: true, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", 1), + txHandlerArgs: nodetypes.TxHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 1, + Tx: []byte(""), + }, + expected: nil, + err: false, + }, + { + name: "oracle disabled", + oracleEnabled: false, + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", 1), + txHandlerArgs: nodetypes.TxHandlerArgs{ + BlockHeight: 1, + BlockTime: time.Time{}, + LatestHeight: 1, + TxIndex: 0, + Tx: []byte(""), + }, + expected: nil, + err: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + h := Host{ + BaseHost: hostprovider.NewTestBaseHost(0, hostNode, ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + BridgeConfig: ophosttypes.BridgeConfig{ + OracleEnabled: tc.oracleEnabled, + }, + }, nodetypes.NodeConfig{}, nil), + child: tc.child, + } + + err := h.txHandler(types.Context{}, tc.txHandlerArgs) + if !tc.err { + require.NoError(t, err) + processedMsgs := h.GetProcessedMsgs() + if tc.expected != nil { + require.Equal(t, 1, len(processedMsgs)) + expectedSender, expectedMsg, err := tc.expected() + require.NoError(t, err) + require.Equal(t, expectedSender, processedMsgs[0].Sender) + require.Equal(t, expectedMsg, processedMsgs[0].Msgs[0]) + } else { + require.Empty(t, processedMsgs) + } + } else { + require.Error(t, err) + } + h.EmptyProcessedMsgs() + }) + } + require.NoError(t, err) +} diff --git a/executor/host/host.go b/executor/host/host.go index c957e2a..ecffe2d 100644 --- a/executor/host/host.go +++ b/executor/host/host.go @@ -3,8 +3,6 @@ package host import ( "context" - "go.uber.org/zap" - sdk "github.com/cosmos/cosmos-sdk/types" ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" @@ -14,19 +12,24 @@ import ( nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" + "github.com/cosmos/cosmos-sdk/codec" + hostprovider "github.com/initia-labs/opinit-bots/provider/host" + + "github.com/pkg/errors" ) type childNode interface { - HasKey() bool - BroadcastMsgs(btypes.ProcessedMsgs) - ProcessedMsgsToRawKV([]btypes.ProcessedMsgs, bool) ([]types.RawKV, error) - QueryNextL1Sequence(context.Context, int64) (uint64, error) - BaseAccountAddressString() (string, error) - OracleAccountAddressString() (string, error) + DB() types.DB + Codec() codec.Codec + + HasBroadcaster() bool + BroadcastProcessedMsgs(...btypes.ProcessedMsgs) GetMsgFinalizeTokenDeposit(string, string, sdk.Coin, uint64, int64, string, []byte) (sdk.Msg, string, error) GetMsgUpdateOracle(int64, []byte) (sdk.Msg, string, error) + + QueryNextL1Sequence(context.Context, int64) (uint64, error) } type batchNode interface { @@ -43,50 +46,50 @@ type Host struct { initialL1Sequence uint64 + stage types.CommitDB + // status info lastProposedOutputIndex uint64 lastProposedOutputL2BlockNumber int64 } -func NewHostV1( - cfg nodetypes.NodeConfig, - db types.DB, logger *zap.Logger, -) *Host { +func NewHostV1(cfg nodetypes.NodeConfig, db types.DB) *Host { return &Host{ - BaseHost: hostprovider.NewBaseHostV1(cfg, db, logger), + BaseHost: hostprovider.NewBaseHostV1(cfg, db), + stage: db.NewStage(), } } func (h *Host) Initialize( - ctx context.Context, - processedHeight int64, + ctx types.Context, + syncedHeight int64, child childNode, batch batchNode, bridgeInfo ophosttypes.QueryBridgeResponse, keyringConfig *btypes.KeyringConfig, ) error { - err := h.BaseHost.Initialize(ctx, processedHeight, bridgeInfo, keyringConfig) + err := h.BaseHost.Initialize(ctx, syncedHeight, bridgeInfo, keyringConfig) if err != nil { - return err + return errors.Wrap(err, "failed to initialize base host") } h.child = child h.batch = batch h.initialL1Sequence, err = h.child.QueryNextL1Sequence(ctx, 0) if err != nil { - return err + return errors.Wrap(err, "failed to query next L1 sequence") } h.registerHandlers() return nil } func (h *Host) InitializeDA( - ctx context.Context, + ctx types.Context, bridgeInfo ophosttypes.QueryBridgeResponse, keyringConfig *btypes.KeyringConfig, ) error { err := h.BaseHost.Initialize(ctx, 0, bridgeInfo, keyringConfig) if err != nil { - return err + return errors.Wrap(err, "failed to initialize base DA host") } h.registerDAHandlers() return nil diff --git a/executor/host/oracle_test.go b/executor/host/oracle_test.go new file mode 100644 index 0000000..e39880e --- /dev/null +++ b/executor/host/oracle_test.go @@ -0,0 +1,85 @@ +package host + +import ( + "testing" + + opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/db" + "github.com/initia-labs/opinit-bots/node" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + childprovider "github.com/initia-labs/opinit-bots/provider/child" + hostprovider "github.com/initia-labs/opinit-bots/provider/host" + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func TestOracleTxHandler(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + hostNode := node.NewTestNode(nodetypes.NodeConfig{}, db.WithPrefix([]byte("test_host")), nil, nil, nil, nil) + childCodec, _, err := childprovider.GetCodec("init") + + h := Host{ + child: NewMockChild(db.WithPrefix([]byte("test_child")), childCodec, "init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", 1), + } + + cases := []struct { + name string + oracleEnabled bool + blockHeight int64 + extCommitBz []byte + expected func() (sender string, msg sdk.Msg, err error) + err bool + }{ + { + name: "oracle enabled", + oracleEnabled: true, + blockHeight: 3, + extCommitBz: []byte("oracle_tx"), + expected: func() (sender string, msg sdk.Msg, err error) { + msg, err = childprovider.CreateAuthzMsg("init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", opchildtypes.NewMsgUpdateOracle("init1z3689ct7pc72yr5an97nsj89dnlefydxwdhcv0", 3, []byte("oracle_tx"))) + sender = "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5" + return sender, msg, err + }, + err: false, + }, + { + name: "oracle disabled", + oracleEnabled: false, + blockHeight: 3, + extCommitBz: []byte("valid_oracle_tx"), + expected: nil, + err: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + h.BaseHost = hostprovider.NewTestBaseHost(0, hostNode, ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + BridgeConfig: ophosttypes.BridgeConfig{ + OracleEnabled: tc.oracleEnabled, + }, + }, nodetypes.NodeConfig{}, nil) + + msg, sender, err := h.oracleTxHandler(tc.blockHeight, tc.extCommitBz) + if !tc.err { + require.NoError(t, err) + if tc.expected != nil { + expectedSender, expectedMsg, err := tc.expected() + require.NoError(t, err) + require.Equal(t, expectedSender, sender) + require.Equal(t, expectedMsg, msg) + } else { + require.Nil(t, msg) + } + } else { + require.Error(t, err) + } + h.EmptyProcessedMsgs() + }) + } + require.NoError(t, err) +} diff --git a/executor/host/status.go b/executor/host/status.go index 7775da2..599f0f9 100644 --- a/executor/host/status.go +++ b/executor/host/status.go @@ -1,9 +1,8 @@ package host import ( - "errors" - nodetypes "github.com/initia-labs/opinit-bots/node/types" + "github.com/pkg/errors" ) type Status struct { @@ -15,7 +14,7 @@ type Status struct { func (h Host) GetStatus() (Status, error) { nodeStatus, err := h.GetNodeStatus() if err != nil { - return Status{}, err + return Status{}, errors.Wrap(err, "failed to get node status") } return Status{ diff --git a/executor/host/withdraw.go b/executor/host/withdraw.go index 8bf89e2..d12b8fd 100644 --- a/executor/host/withdraw.go +++ b/executor/host/withdraw.go @@ -1,56 +1,48 @@ package host import ( - "context" "encoding/base64" nodetypes "github.com/initia-labs/opinit-bots/node/types" hostprovider "github.com/initia-labs/opinit-bots/provider/host" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" "go.uber.org/zap" ) -func (h *Host) proposeOutputHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (h *Host) proposeOutputHandler(ctx types.Context, args nodetypes.EventHandlerArgs) error { bridgeId, l2BlockNumber, outputIndex, proposer, outputRoot, err := hostprovider.ParseMsgProposeOutput(args.EventAttributes) if err != nil { - return err + return errors.Wrap(err, "failed to parse propose output event") } if bridgeId != h.BridgeId() { // pass other bridge output proposal event return nil } - h.handleProposeOutput(bridgeId, proposer, outputIndex, l2BlockNumber, outputRoot) h.lastProposedOutputIndex = outputIndex h.lastProposedOutputL2BlockNumber = l2BlockNumber - return nil -} - -func (h *Host) handleProposeOutput(bridgeId uint64, proposer string, outputIndex uint64, l2BlockNumber int64, outputRoot []byte) { - h.Logger().Info("propose output", + ctx.Logger().Info("propose output", zap.Uint64("bridge_id", bridgeId), zap.String("proposer", proposer), zap.Uint64("output_index", outputIndex), zap.Int64("l2_block_number", l2BlockNumber), zap.String("output_root", base64.StdEncoding.EncodeToString(outputRoot)), ) + return nil } -func (h *Host) finalizeWithdrawalHandler(_ context.Context, args nodetypes.EventHandlerArgs) error { +func (h *Host) finalizeWithdrawalHandler(ctx types.Context, args nodetypes.EventHandlerArgs) error { bridgeId, outputIndex, l2Sequence, from, to, l1Denom, l2Denom, amount, err := hostprovider.ParseMsgFinalizeWithdrawal(args.EventAttributes) if err != nil { - return err + return errors.Wrap(err, "failed to parse finalize withdrawal event") } if bridgeId != h.BridgeId() { // pass other bridge withdrawal event return nil } - h.handleFinalizeWithdrawal(bridgeId, outputIndex, l2Sequence, from, to, l1Denom, l2Denom, amount) - return nil -} - -func (h *Host) handleFinalizeWithdrawal(bridgeId uint64, outputIndex uint64, l2Sequence uint64, from string, to string, l1Denom string, l2Denom string, amount string) { - h.Logger().Info("finalize withdrawal", + ctx.Logger().Info("finalize withdrawal", zap.Uint64("bridge_id", bridgeId), zap.Uint64("output_index", outputIndex), zap.Uint64("l2_sequence", l2Sequence), @@ -60,4 +52,5 @@ func (h *Host) handleFinalizeWithdrawal(bridgeId uint64, outputIndex uint64, l2S zap.String("l2_denom", l2Denom), zap.String("amount", amount), ) + return nil } diff --git a/executor/host/withdraw_test.go b/executor/host/withdraw_test.go index 06cd206..676eeac 100644 --- a/executor/host/withdraw_test.go +++ b/executor/host/withdraw_test.go @@ -1,35 +1,352 @@ package host -// import ( -// "testing" - -// "github.com/initia-labs/opinit-bots/db" -// "github.com/initia-labs/opinit-bots/node/types" -// "github.com/stretchr/testify/require" -// "go.uber.org/zap" -// "go.uber.org/zap/zaptest/observer" -// ) - -// func defaultConfig() types.NodeConfig { -// return types.NodeConfig{ -// RPC: "http://localhost:26657", -// ChainID: "testnet-1", -// Account: "test-acc", -// GasPrice: "0.15uinit", -// } -// } - -// func logCapture() (*zap.Logger, *observer.ObservedLogs) { -// core, logs := observer.New(zap.InfoLevel) -// return zap.New(core), logs -// } - -// func Test_handleProposeOutput(t *testing.T) { -// db, err := db.NewDB(t.TempDir()) -// require.NoError(t, err) - -// defer db.Close() - -// logger, logs := logCapture() -// host := NewHost(1, true, defaultConfig(), db, logger, t.TempDir(), "") -// } +import ( + "context" + "encoding/base64" + "encoding/hex" + "strconv" + "testing" + + abcitypes "github.com/cometbft/cometbft/abci/types" + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/db" + "github.com/initia-labs/opinit-bots/node" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + hostprovider "github.com/initia-labs/opinit-bots/provider/host" + "github.com/initia-labs/opinit-bots/types" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func ProposeOutputEvents( + proposer string, + bridgeId uint64, + outputIndex uint64, + l2BlockNumber uint64, + outputRoot []byte, +) []abcitypes.EventAttribute { + return []abcitypes.EventAttribute{ + { + Key: ophosttypes.AttributeKeyProposer, + Value: proposer, + }, + { + Key: ophosttypes.AttributeKeyBridgeId, + Value: strconv.FormatUint(bridgeId, 10), + }, + { + Key: ophosttypes.AttributeKeyOutputIndex, + Value: strconv.FormatUint(outputIndex, 10), + }, + { + Key: ophosttypes.AttributeKeyL2BlockNumber, + Value: strconv.FormatUint(l2BlockNumber, 10), + }, + { + Key: ophosttypes.AttributeKeyOutputRoot, + Value: hex.EncodeToString(outputRoot), + }, + } +} + +func TestProposeOutputHandler(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + hostNode := node.NewTestNode(nodetypes.NodeConfig{}, db.WithPrefix([]byte("test_host")), nil, nil, nil, nil) + bridgeInfo := ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + } + + h := Host{ + BaseHost: hostprovider.NewTestBaseHost(0, hostNode, bridgeInfo, nodetypes.NodeConfig{}, nil), + } + + fullAttributes := ProposeOutputEvents("proposer", 1, 2, 3, []byte("output_root")) + + cases := []struct { + name string + eventHandlerArgs nodetypes.EventHandlerArgs + expected func() (msg string, fields []zapcore.Field) + err bool + }{ + { + name: "success", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: ProposeOutputEvents("proposer", 1, 2, 3, []byte("output_root")), + }, + expected: func() (msg string, fields []zapcore.Field) { + msg = "propose output" + fields = []zapcore.Field{ + zap.Uint64("bridge_id", 1), + zap.String("proposer", "proposer"), + zap.Uint64("output_index", 2), + zap.Int64("l2_block_number", 3), + zap.String("output_root", base64.StdEncoding.EncodeToString([]byte("output_root"))), + } + return msg, fields + }, + err: false, + }, + { + name: "different bridge id", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: ProposeOutputEvents("proposer", 2, 2, 3, []byte("output_root")), + }, + expected: nil, + err: false, + }, + { + name: "missing event attribute proposer", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: fullAttributes[1:], + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute bridge id", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:1], fullAttributes[2:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute output index", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:2], fullAttributes[3:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute l2 block number", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:3], fullAttributes[4:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute output root", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: fullAttributes[:4], + }, + expected: nil, + err: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + logger, observedLogs := logCapturer() + ctx := types.NewContext(context.Background(), logger, "") + + err := h.proposeOutputHandler(ctx, tc.eventHandlerArgs) + if !tc.err { + require.NoError(t, err) + if tc.expected != nil { + logs := observedLogs.TakeAll() + require.Len(t, logs, 1) + + expectedMsg, expectedFields := tc.expected() + require.Equal(t, expectedMsg, logs[0].Message) + require.Equal(t, expectedFields, logs[0].Context) + } + } else { + require.Error(t, err) + } + }) + } + require.NoError(t, err) +} + +func FinalizeWithdrawalEvents( + bridgeId uint64, + outputIndex uint64, + l2Sequence uint64, + from string, + to string, + l1Denom string, + l2Denom string, + amount sdk.Coin, +) []abcitypes.EventAttribute { + return []abcitypes.EventAttribute{ + { + Key: ophosttypes.AttributeKeyBridgeId, + Value: strconv.FormatUint(bridgeId, 10), + }, + { + Key: ophosttypes.AttributeKeyOutputIndex, + Value: strconv.FormatUint(outputIndex, 10), + }, + { + Key: ophosttypes.AttributeKeyL2Sequence, + Value: strconv.FormatUint(l2Sequence, 10), + }, + { + Key: ophosttypes.AttributeKeyFrom, + Value: from, + }, + { + Key: ophosttypes.AttributeKeyTo, + Value: to, + }, + { + Key: ophosttypes.AttributeKeyL1Denom, + Value: l1Denom, + }, + { + Key: ophosttypes.AttributeKeyL2Denom, + Value: l2Denom, + }, + { + Key: ophosttypes.AttributeKeyAmount, + Value: amount.String(), + }, + } +} + +func TestFinalizeWithdrawalHandler(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + hostNode := node.NewTestNode(nodetypes.NodeConfig{}, db.WithPrefix([]byte("test_host")), nil, nil, nil, nil) + bridgeInfo := ophosttypes.QueryBridgeResponse{ + BridgeId: 1, + } + + h := Host{ + BaseHost: hostprovider.NewTestBaseHost(0, hostNode, bridgeInfo, nodetypes.NodeConfig{}, nil), + } + + fullAttributes := FinalizeWithdrawalEvents(1, 2, 3, "from", "to", "l1Denom", "l2Denom", sdk.NewInt64Coin("uinit", 10000)) + + cases := []struct { + name string + eventHandlerArgs nodetypes.EventHandlerArgs + expected func() (msg string, fields []zapcore.Field) + err bool + }{ + { + name: "success", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: FinalizeWithdrawalEvents(1, 2, 3, "from", "to", "l1Denom", "l2Denom", sdk.NewInt64Coin("uinit", 10000)), + }, + expected: func() (msg string, fields []zapcore.Field) { + msg = "finalize withdrawal" + fields = []zapcore.Field{ + zap.Uint64("bridge_id", 1), + zap.Uint64("output_index", 2), + zap.Uint64("l2_sequence", 3), + zap.String("from", "from"), + zap.String("to", "to"), + zap.String("l1_denom", "l1Denom"), + zap.String("l2_denom", "l2Denom"), + zap.String("amount", "10000uinit"), + } + return msg, fields + }, + err: false, + }, + { + name: "different bridge id", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: FinalizeWithdrawalEvents(2, 2, 3, "from", "to", "l1Denom", "l2Denom", sdk.NewInt64Coin("uinit", 10000)), + }, + expected: nil, + err: false, + }, + { + name: "missing event attribute bridge id", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: fullAttributes[1:], + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute output index", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:1], fullAttributes[2:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute l2 sequence", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:2], fullAttributes[3:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute from", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:3], fullAttributes[4:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute to", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:4], fullAttributes[5:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute l1 denom", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:5], fullAttributes[6:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute l2 denom", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: append(fullAttributes[:6], fullAttributes[7:]...), + }, + expected: nil, + err: true, + }, + { + name: "missing event attribute amount", + eventHandlerArgs: nodetypes.EventHandlerArgs{ + EventAttributes: fullAttributes[:7], + }, + expected: nil, + err: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + logger, observedLogs := logCapturer() + ctx := types.NewContext(context.Background(), logger, "") + + err := h.finalizeWithdrawalHandler(ctx, tc.eventHandlerArgs) + if !tc.err { + require.NoError(t, err) + if tc.expected != nil { + logs := observedLogs.TakeAll() + require.Len(t, logs, 1) + + expectedMsg, expectedFields := tc.expected() + require.Equal(t, expectedMsg, logs[0].Message) + require.Equal(t, expectedFields, logs[0].Context) + } + } else { + require.Error(t, err) + } + }) + } + require.NoError(t, err) +} diff --git a/executor/querier.go b/executor/querier.go new file mode 100644 index 0000000..4222a3c --- /dev/null +++ b/executor/querier.go @@ -0,0 +1,69 @@ +package executor + +import ( + "strconv" + + "github.com/gofiber/fiber/v2" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" +) + +func (ex *Executor) RegisterQuerier() { + ex.server.RegisterQuerier("/withdrawal/:sequence", func(c *fiber.Ctx) error { + sequenceStr := c.Params("sequence") + if sequenceStr == "" { + return errors.New("sequence is required") + } + sequence, err := strconv.ParseUint(sequenceStr, 10, 64) + if err != nil { + return errors.Wrap(err, "failed to parse sequence") + } + res, err := ex.child.QueryWithdrawal(sequence) + if err != nil { + return err + } + return c.JSON(res) + }) + + ex.server.RegisterQuerier("/withdrawals/:address", func(c *fiber.Ctx) error { + address := c.Params("address") + if address == "" { + return errors.New("address is required") + } + + offset := c.QueryInt("offset", 0) + uoffset, err := types.SafeInt64ToUint64(int64(offset)) + if err != nil { + return errors.Wrap(err, "failed to convert offset") + } + + limit := c.QueryInt("limit", 10) + if limit > 100 { + limit = 100 + } + + ulimit, err := types.SafeInt64ToUint64(int64(limit)) + if err != nil { + return errors.Wrap(err, "failed to convert limit") + } + + descOrder := true + orderStr := c.Query("order", "desc") + if orderStr == "asc" { + descOrder = false + } + res, err := ex.child.QueryWithdrawals(address, uoffset, ulimit, descOrder) + if err != nil { + return err + } + return c.JSON(res) + }) + + ex.server.RegisterQuerier("/status", func(c *fiber.Ctx) error { + status, err := ex.GetStatus() + if err != nil { + return err + } + return c.JSON(status) + }) +} diff --git a/executor/status.go b/executor/status.go index 011fcd7..e505714 100644 --- a/executor/status.go +++ b/executor/status.go @@ -1,18 +1,19 @@ package executor import ( - "github.com/initia-labs/opinit-bots/executor/batch" + "github.com/initia-labs/opinit-bots/executor/batchsubmitter" "github.com/initia-labs/opinit-bots/executor/child" "github.com/initia-labs/opinit-bots/executor/host" nodetypes "github.com/initia-labs/opinit-bots/node/types" + "github.com/pkg/errors" ) type Status struct { - BridgeId uint64 `json:"bridge_id"` - Host host.Status `json:"host,omitempty"` - Child child.Status `json:"child,omitempty"` - Batch batch.Status `json:"batch,omitempty"` - DA nodetypes.Status `json:"da,omitempty"` + BridgeId uint64 `json:"bridge_id"` + Host host.Status `json:"host,omitempty"` + Child child.Status `json:"child,omitempty"` + BatchSubmitter batchsubmitter.Status `json:"batch_submitter,omitempty"` + DA nodetypes.Status `json:"da,omitempty"` } func (ex Executor) GetStatus() (Status, error) { @@ -23,24 +24,24 @@ func (ex Executor) GetStatus() (Status, error) { s.BridgeId = ex.host.BridgeId() s.Host, err = ex.host.GetStatus() if err != nil { - return Status{}, err + return Status{}, errors.Wrap(err, "failed to get host status") } } if ex.child != nil { s.Child, err = ex.child.GetStatus() if err != nil { - return Status{}, err + return Status{}, errors.Wrap(err, "failed to get child status") } } - if ex.batch != nil { - s.Batch, err = ex.batch.GetStatus() + if ex.batchSubmitter != nil { + s.BatchSubmitter, err = ex.batchSubmitter.GetStatus() if err != nil { - return Status{}, err + return Status{}, errors.Wrap(err, "failed to get batch status") } - if ex.batch.DA() != nil { - s.DA, err = ex.batch.DA().GetNodeStatus() + if ex.batchSubmitter.DA() != nil { + s.DA, err = ex.batchSubmitter.DA().GetNodeStatus() if err != nil { - return Status{}, err + return Status{}, errors.Wrap(err, "failed to get DA status") } } } diff --git a/executor/types/batch.go b/executor/types/batch.go index 826d9ef..94ec47d 100644 --- a/executor/types/batch.go +++ b/executor/types/batch.go @@ -1,36 +1,73 @@ package types import ( - "context" "crypto/sha256" "encoding/binary" + "encoding/json" "fmt" "time" btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" + + "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" ) type DANode interface { - Start(context.Context) - HasKey() bool + DB() types.DB + Codec() codec.Codec + + Start(types.Context) + HasBroadcaster() bool + BroadcastProcessedMsgs(...btypes.ProcessedMsgs) + CreateBatchMsg([]byte) (sdk.Msg, string, error) - BroadcastMsgs(btypes.ProcessedMsgs) - ProcessedMsgsToRawKV(processedMsgs []btypes.ProcessedMsgs, delete bool) ([]types.RawKV, error) + GetNodeStatus() (nodetypes.Status, error) } +var LocalBatchInfoKey = []byte("local_batch_info") + type LocalBatchInfo struct { // start l2 block height which is included in the batch Start int64 `json:"start"` // last l2 block height which is included in the batch End int64 `json:"end"` - + // last submission time of the batch LastSubmissionTime time.Time `json:"last_submission_time"` - BatchFileSize int64 `json:"batch_size"` + // batch file size + BatchSize int64 `json:"batch_size"` +} + +func (l LocalBatchInfo) Key() []byte { + return LocalBatchInfoKey +} + +func (l LocalBatchInfo) Value() ([]byte, error) { + bz, err := l.Marshal() + if err != nil { + return nil, err + } + return bz, nil +} + +func (l LocalBatchInfo) Marshal() ([]byte, error) { + bz, err := json.Marshal(l) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal local batch info") + } + return bz, nil +} + +func (l *LocalBatchInfo) Unmarshal(bz []byte) error { + if err := json.Unmarshal(bz, l); err != nil { + return errors.Wrap(err, "failed to unmarshal local batch info") + } + return nil } type BatchDataType uint8 diff --git a/executor/types/batch_test.go b/executor/types/batch_test.go index 72b3002..e1376db 100644 --- a/executor/types/batch_test.go +++ b/executor/types/batch_test.go @@ -2,10 +2,29 @@ package types import ( "testing" + "time" "github.com/stretchr/testify/require" ) +func TestLocalBatchInfo(t *testing.T) { + batchInfo := LocalBatchInfo{ + Start: 1, + End: 100, + LastSubmissionTime: time.Unix(0, 10000).UTC(), + BatchSize: 100, + } + + bz, err := batchInfo.Marshal() + require.NoError(t, err) + + batchInfo2 := LocalBatchInfo{} + err = batchInfo2.Unmarshal(bz) + require.NoError(t, err) + + require.Equal(t, batchInfo, batchInfo2) +} + func TestBatchDataHeader(t *testing.T) { start := uint64(1) end := uint64(100) @@ -36,3 +55,28 @@ func TestBatchDataHeader(t *testing.T) { require.Equal(t, checksums, header.Checksums) require.Equal(t, len(chunks), len(header.Checksums)) } + +func TestBatchDataChunk(t *testing.T) { + start := uint64(1) + end := uint64(100) + index := uint64(0) + length := uint64(100) + chunkData := []byte("chunk") + + chunkDataData := MarshalBatchDataChunk( + start, + end, + index, + length, + chunkData) + require.Equal(t, 1+8+8+8+8+5, len(chunkDataData)) + + chunk, err := UnmarshalBatchDataChunk(chunkDataData) + require.NoError(t, err) + + require.Equal(t, start, chunk.Start) + require.Equal(t, end, chunk.End) + require.Equal(t, index, chunk.Index) + require.Equal(t, length, chunk.Length) + require.Equal(t, chunkData, chunk.ChunkData) +} diff --git a/executor/types/config.go b/executor/types/config.go index 53ccf68..f620aa7 100644 --- a/executor/types/config.go +++ b/executor/types/config.go @@ -1,13 +1,13 @@ package types import ( - "errors" "time" btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" nodetypes "github.com/initia-labs/opinit-bots/node/types" servertypes "github.com/initia-labs/opinit-bots/server/types" + "github.com/pkg/errors" ) type NodeConfig struct { @@ -143,14 +143,14 @@ func DefaultConfig() *Config { MaxSubmissionTime: 60 * 60, // 1 hour DisableAutoSetL1Height: false, - L1StartHeight: 0, - L2StartHeight: 0, - BatchStartHeight: 0, + L1StartHeight: 1, + L2StartHeight: 1, + BatchStartHeight: 1, DisableDeleteFutureWithdrawal: false, } } -func (cfg Config) Validate() error { +func (cfg *Config) Validate() error { if cfg.Version == 0 { return errors.New("version is required") } @@ -164,15 +164,15 @@ func (cfg Config) Validate() error { } if err := cfg.L1Node.Validate(); err != nil { - return err + return errors.Wrap(err, "l1 node validation error") } if err := cfg.L2Node.Validate(); err != nil { - return err + return errors.Wrap(err, "l2 node validation error") } if err := cfg.DANode.Validate(); err != nil { - return err + return errors.Wrap(err, "da node validation error") } if cfg.MaxChunks <= 0 { @@ -187,21 +187,21 @@ func (cfg Config) Validate() error { return errors.New("max submission time must be greater than 0") } - if cfg.L1StartHeight < 0 { - return errors.New("l1 start height must be greater than or equal to 0") + if cfg.L1StartHeight <= 0 { + return errors.New("l1 start height must be greater than 0") } - if cfg.L2StartHeight < 0 { - return errors.New("l2 start height must be greater than or equal to 0") + if cfg.L2StartHeight <= 0 { + return errors.New("l2 start height must be greater than 0") } - if cfg.BatchStartHeight < 0 { - return errors.New("batch start height must be greater than or equal to 0") + if cfg.BatchStartHeight <= 0 { + return errors.New("batch start height must be greater than 0") } return nil } -func (cfg Config) L1NodeConfig(homePath string) nodetypes.NodeConfig { +func (cfg Config) L1NodeConfig() nodetypes.NodeConfig { nc := nodetypes.NodeConfig{ RPC: cfg.L1Node.RPCAddress, ProcessType: nodetypes.PROCESS_TYPE_DEFAULT, @@ -215,14 +215,13 @@ func (cfg Config) L1NodeConfig(homePath string) nodetypes.NodeConfig { GasAdjustment: cfg.L1Node.GasAdjustment, TxTimeout: time.Duration(cfg.L1Node.TxTimeout) * time.Second, Bech32Prefix: cfg.L1Node.Bech32Prefix, - HomePath: homePath, } } return nc } -func (cfg Config) L2NodeConfig(homePath string) nodetypes.NodeConfig { +func (cfg Config) L2NodeConfig() nodetypes.NodeConfig { nc := nodetypes.NodeConfig{ RPC: cfg.L2Node.RPCAddress, ProcessType: nodetypes.PROCESS_TYPE_DEFAULT, @@ -236,14 +235,13 @@ func (cfg Config) L2NodeConfig(homePath string) nodetypes.NodeConfig { GasAdjustment: cfg.L2Node.GasAdjustment, TxTimeout: time.Duration(cfg.L2Node.TxTimeout) * time.Second, Bech32Prefix: cfg.L2Node.Bech32Prefix, - HomePath: homePath, } } return nc } -func (cfg Config) DANodeConfig(homePath string) nodetypes.NodeConfig { +func (cfg Config) DANodeConfig() nodetypes.NodeConfig { nc := nodetypes.NodeConfig{ RPC: cfg.DANode.RPCAddress, ProcessType: nodetypes.PROCESS_TYPE_ONLY_BROADCAST, @@ -257,7 +255,6 @@ func (cfg Config) DANodeConfig(homePath string) nodetypes.NodeConfig { GasAdjustment: cfg.DANode.GasAdjustment, TxTimeout: time.Duration(cfg.DANode.TxTimeout) * time.Second, Bech32Prefix: cfg.DANode.Bech32Prefix, - HomePath: homePath, } } return nc diff --git a/executor/types/db.go b/executor/types/db.go index c69492e..93c407a 100644 --- a/executor/types/db.go +++ b/executor/types/db.go @@ -1,5 +1,11 @@ package types +import ( + "encoding/json" + + "github.com/pkg/errors" +) + type WithdrawalData struct { Sequence uint64 `json:"sequence"` From string `json:"from"` @@ -7,9 +13,83 @@ type WithdrawalData struct { Amount uint64 `json:"amount"` BaseDenom string `json:"base_denom"` WithdrawalHash []byte `json:"withdrawal_hash"` + + // extra info + TxHeight int64 `json:"tx_height"` + TxTime int64 `json:"tx_time"` + TxHash string `json:"tx_hash"` +} + +func NewWithdrawalData( + sequence uint64, + from string, + to string, + amount uint64, + baseDenom string, + withdrawalHash []byte, + txHeight int64, + txTime int64, + txHash string, +) WithdrawalData { + return WithdrawalData{ + Sequence: sequence, + From: from, + To: to, + Amount: amount, + BaseDenom: baseDenom, + WithdrawalHash: withdrawalHash, + TxHeight: txHeight, + TxTime: txTime, + TxHash: txHash, + } +} + +func (w WithdrawalData) Marshal() ([]byte, error) { + bz, err := json.Marshal(w) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal withdrawal data") + } + return bz, nil +} + +func (w *WithdrawalData) Unmarshal(bz []byte) error { + err := json.Unmarshal(bz, w) + if err != nil { + return errors.Wrap(err, "failed to unmarshal withdrawal data") + } + return nil } type TreeExtraData struct { BlockNumber int64 `json:"block_number"` + BlockTime int64 `json:"block_time"` BlockHash []byte `json:"block_hash"` } + +func NewTreeExtraData( + blockNumber int64, + blockTime int64, + blockHash []byte, +) TreeExtraData { + return TreeExtraData{ + BlockNumber: blockNumber, + BlockTime: blockTime, + BlockHash: blockHash, + } +} + +func (t TreeExtraData) Marshal() ([]byte, error) { + bz, err := json.Marshal(t) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal tree extra data") + } + return bz, nil +} + +func (t *TreeExtraData) Unmarshal(bz []byte) error { + err := json.Unmarshal(bz, t) + if err != nil { + return errors.Wrap(err, "failed to unmarshal tree extra data") + } + return nil +} diff --git a/executor/types/db_test.go b/executor/types/db_test.go new file mode 100644 index 0000000..fea768d --- /dev/null +++ b/executor/types/db_test.go @@ -0,0 +1,49 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestWithdrawalData(t *testing.T) { + wd := WithdrawalData{ + Sequence: 100, + From: "from", + To: "to", + Amount: 100, + BaseDenom: "base_denom", + WithdrawalHash: []byte("withdrawal_hash"), + TxHeight: 1000, + TxTime: 10000, + TxHash: "tx_hash", + } + wd2 := NewWithdrawalData(100, "from", "to", 100, "base_denom", []byte("withdrawal_hash"), 1000, 10000, "tx_hash") + require.Equal(t, wd, wd2) + + bz, err := wd.Marshal() + require.NoError(t, err) + + wd3 := WithdrawalData{} + err = wd3.Unmarshal(bz) + require.NoError(t, err) + require.Equal(t, wd, wd3) +} + +func TestTreeExtraData(t *testing.T) { + td := TreeExtraData{ + BlockNumber: 100, + BlockTime: 100, + BlockHash: []byte("block_hash"), + } + td2 := NewTreeExtraData(100, 100, []byte("block_hash")) + require.Equal(t, td, td2) + + bz, err := td.Marshal() + require.NoError(t, err) + + td3 := TreeExtraData{} + err = td3.Unmarshal(bz) + require.NoError(t, err) + require.Equal(t, td, td3) +} diff --git a/executor/types/key.go b/executor/types/key.go index deae983..7790d2a 100644 --- a/executor/types/key.go +++ b/executor/types/key.go @@ -2,20 +2,51 @@ package types import ( dbtypes "github.com/initia-labs/opinit-bots/db/types" + "github.com/pkg/errors" ) var ( - WithdrawalKey = []byte("withdrawal") + WithdrawalPrefix = []byte("withdrawal") + WithdrawalSequencePrefix = []byte("withdrawal_sequence") + WithdrawalAddressPrefix = []byte("withdrawal_address") + + WithdrawalSequenceKeyLength = len(WithdrawalSequencePrefix) + 1 + 8 ) -func PrefixedWithdrawalKey(sequence uint64) []byte { - return append(append(WithdrawalKey, dbtypes.Splitter), dbtypes.FromUint64Key(sequence)...) +func PrefixedWithdrawalSequence(sequence uint64) []byte { + return dbtypes.GenerateKey([][]byte{ + WithdrawalSequencePrefix, + dbtypes.FromUint64Key(sequence), + }) +} + +func PrefixedWithdrawalAddress(address string) []byte { + return dbtypes.GenerateKey([][]byte{ + WithdrawalAddressPrefix, + []byte(address), + }) +} + +func PrefixedWithdrawalAddressSequence(address string, sequence uint64) []byte { + return dbtypes.GenerateKey([][]byte{ + WithdrawalAddressPrefix, + []byte(address), + dbtypes.FromUint64Key(sequence), + }) } -func PrefixedWithdrawalKeyAddress(address string) []byte { - return append(append(append(WithdrawalKey, dbtypes.Splitter), []byte(address)...), dbtypes.Splitter) +func ParseWithdrawalSequenceKey(key []byte) (uint64, error) { + if len(key) != WithdrawalSequenceKeyLength { + return 0, errors.New("invalid key length") + } + return dbtypes.ToUint64Key(key[len(WithdrawalSequencePrefix)+1:]), nil } -func PrefixedWithdrawalKeyAddressIndex(address string, index uint64) []byte { - return append(PrefixedWithdrawalKeyAddress(address), dbtypes.FromUint64Key(index)...) +func ParseWithdrawalAddressSequenceKey(key []byte) (string, uint64, error) { + if len(key) <= len(WithdrawalAddressPrefix)+1+8+1 { + return "", 0, errors.New("invalid key length") + } + sequence := dbtypes.ToUint64Key(key[len(key)-8:]) + address := string(key[len(WithdrawalAddressPrefix)+1 : len(key)-8-1]) + return address, sequence, nil } diff --git a/executor/types/key_test.go b/executor/types/key_test.go new file mode 100644 index 0000000..827e34b --- /dev/null +++ b/executor/types/key_test.go @@ -0,0 +1,47 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPrefixedWithdrawalSequence(t *testing.T) { + bz := PrefixedWithdrawalSequence(0) + require.Equal(t, bz, append([]byte("withdrawal_sequence/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}...)) + + bz = PrefixedWithdrawalSequence(100) + require.Equal(t, bz, append([]byte("withdrawal_sequence/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64}...)) +} + +func TestPrefixedWithdrawalAddress(t *testing.T) { + bz := PrefixedWithdrawalAddress("address") + require.Equal(t, bz, []byte("withdrawal_address/address")) +} + +func TestPrefixedWithdrawalAddressSequence(t *testing.T) { + bz := PrefixedWithdrawalAddressSequence("address", 0) + require.Equal(t, bz, append([]byte("withdrawal_address/address/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}...)) + + bz = PrefixedWithdrawalAddressSequence("address", 100) + require.Equal(t, bz, append([]byte("withdrawal_address/address/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64}...)) +} + +func TestParseWithdrawalSequenceKey(t *testing.T) { + _, err := ParseWithdrawalSequenceKey([]byte("withdrawal_sequence/")) + require.Error(t, err) + + sequence, err := ParseWithdrawalSequenceKey(append([]byte("withdrawal_sequence/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64}...)) + require.NoError(t, err) + require.Equal(t, uint64(100), sequence) +} + +func TestParseWithdrawalAddressSequenceKey(t *testing.T) { + _, _, err := ParseWithdrawalAddressSequenceKey([]byte("withdrawal_address/address/")) + require.Error(t, err) + + address, sequence, err := ParseWithdrawalAddressSequenceKey(append([]byte("withdrawal_address/address/"), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64}...)) + require.NoError(t, err) + require.Equal(t, "address", address) + require.Equal(t, uint64(100), sequence) +} diff --git a/executor/types/query.go b/executor/types/query.go index 3e1d539..01acfa0 100644 --- a/executor/types/query.go +++ b/executor/types/query.go @@ -16,8 +16,9 @@ type QueryWithdrawalResponse struct { LastBlockHash []byte `json:"last_block_hash"` // extra info - // BlockNumber int64 `json:"block_number"` - // WithdrawalHash []byte `json:"withdrawal_hash"` + TxTime int64 `json:"tx_time"` + TxHeight int64 `json:"tx_height"` + TxHash string `json:"tx_hash"` } type QueryWithdrawalsResponse struct { diff --git a/keys/address.go b/keys/address.go index 2a502d0..5b38b8b 100644 --- a/keys/address.go +++ b/keys/address.go @@ -6,10 +6,12 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) +// EncodeBech32AccAddr encodes the given account address to bech32 format func EncodeBech32AccAddr(addr sdk.AccAddress, prefix string) (string, error) { return sdk.Bech32ifyAddressBytes(prefix, addr) } +// DecodeBech32AccAddr decodes the given bech32 account address func DecodeBech32AccAddr(addr string, prefix string) (sdk.AccAddress, error) { return sdk.GetFromBech32(addr, prefix) } diff --git a/keys/address_test.go b/keys/address_test.go new file mode 100644 index 0000000..9531275 --- /dev/null +++ b/keys/address_test.go @@ -0,0 +1,157 @@ +package keys + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func TestEncodeBech32Addr(t *testing.T) { + cases := []struct { + title string + base string + bech32Prefix string + expected string + err bool + }{ + { + title: "init", + base: "b8fb0b7c5158f5028ff0a8ce99712fb545cbf74e", + bech32Prefix: "init", + expected: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", + err: false, + }, + { + title: "abcdef", + base: "b8fb0b7c5158f5028ff0a8ce99712fb545cbf74e", + bech32Prefix: "abcdef", + expected: "abcdef1hrasklz3tr6s9rls4r8fjuf0k4zuha6whsl7cn", + err: false, + }, + { + title: "cosmos", + base: "b8fb0b7c5158f5028ff0a8ce99712fb545cbf74e", + bech32Prefix: "cosmos", + expected: "cosmos1hrasklz3tr6s9rls4r8fjuf0k4zuha6wt4u7jk", + err: false, + }, + { + title: "empty prefix", + base: "b8fb0b7c5158f5028ff0a8ce99712fb545cbf74e", + bech32Prefix: "", + expected: "", + err: true, + }, + { + title: "empty base", + base: "", + bech32Prefix: "abcde", + expected: "", + err: false, + }, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + hexBase, err := hex.DecodeString(tc.base) + require.NoError(t, err) + + addr, err := EncodeBech32AccAddr(hexBase, tc.bech32Prefix) + if tc.err { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tc.expected, addr) + } + }) + } +} + +func TestDecodeBech32Addr(t *testing.T) { + cases := []struct { + title string + address string + bech32Prefix string + expected string + err bool + }{ + { + title: "init", + address: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", + bech32Prefix: "init", + expected: "b8fb0b7c5158f5028ff0a8ce99712fb545cbf74e", + err: false, + }, + { + title: "abcdef", + address: "abcdef1hrasklz3tr6s9rls4r8fjuf0k4zuha6whsl7cn", + bech32Prefix: "abcdef", + expected: "b8fb0b7c5158f5028ff0a8ce99712fb545cbf74e", + err: false, + }, + { + title: "cosmos", + address: "cosmos1hrasklz3tr6s9rls4r8fjuf0k4zuha6wt4u7jk", + bech32Prefix: "cosmos", + expected: "b8fb0b7c5158f5028ff0a8ce99712fb545cbf74e", + err: false, + }, + { + title: "empty prefix", + address: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", + bech32Prefix: "", + expected: "", + err: true, + }, + { + title: "invalid prefix", + address: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5", + bech32Prefix: "init1", + expected: "", + err: true, + }, + { + title: "invalid address", + address: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9r1ude5", + bech32Prefix: "init", + expected: "", + err: true, + }, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + base, err := DecodeBech32AccAddr(tc.address, tc.bech32Prefix) + if tc.err { + require.Error(t, err) + } else { + require.NoError(t, err) + expected, err := hex.DecodeString(tc.expected) + require.NoError(t, err) + require.Equal(t, expected, base.Bytes()) + } + }) + } +} + +func TestSetSDKConfigContext(t *testing.T) { + prefixes := []string{"init", "cosmos", "abcdef", "abcabcabc", ""} + + for _, prefix := range prefixes { + t.Run(prefix, func(t *testing.T) { + t.Parallel() + unlock := SetSDKConfigContext(prefix) + defer unlock() + + // test the sdk config + conf := sdk.GetConfig() + require.Equal(t, prefix, conf.GetBech32AccountAddrPrefix()) + require.Equal(t, prefix+"pub", conf.GetBech32AccountPubPrefix()) + require.Equal(t, prefix+"valoper", conf.GetBech32ValidatorAddrPrefix()) + require.Equal(t, prefix+"valoperpub", conf.GetBech32ValidatorPubPrefix()) + }) + } +} diff --git a/keys/codec.go b/keys/codec.go index 890e80d..b68461c 100644 --- a/keys/codec.go +++ b/keys/codec.go @@ -1,6 +1,7 @@ package keys import ( + "cosmossdk.io/errors" "cosmossdk.io/x/tx/signing" "github.com/cosmos/gogoproto/proto" @@ -24,7 +25,7 @@ func CreateCodec(registerFns []RegisterInterfaces) (codec.Codec, client.TxConfig }, }) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to create interface registry") } appCodec := codec.NewProtoCodec(interfaceRegistry) std.RegisterInterfaces(interfaceRegistry) diff --git a/keys/codec_test.go b/keys/codec_test.go new file mode 100644 index 0000000..366973b --- /dev/null +++ b/keys/codec_test.go @@ -0,0 +1,54 @@ +package keys + +import ( + "testing" + + "github.com/initia-labs/OPinit/x/opchild" + opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + "github.com/initia-labs/opinit-bots/txutils" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/x/auth" +) + +func TestCreateCodec(t *testing.T) { + unlock := SetSDKConfigContext("init") + codec, txConfig, err := CreateCodec([]RegisterInterfaces{ + auth.AppModuleBasic{}.RegisterInterfaces, + opchild.AppModuleBasic{}.RegisterInterfaces, + }) + require.NoError(t, err) + unlock() + + _, _, err = codec.GetMsgV1Signers(&opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5"}) + require.NoError(t, err) + _, _, err = codec.GetMsgV1Signers(&opchildtypes.MsgUpdateOracle{Sender: "cosmos1hrasklz3tr6s9rls4r8fjuf0k4zuha6wt4u7jk"}) + require.Error(t, err) + + txf := tx.Factory{}.WithChainID("test_chain").WithTxConfig(txConfig) + txb, err := txf.BuildUnsignedTx(&opchildtypes.MsgUpdateOracle{}) + require.NoError(t, err) + txbytes, err := txutils.EncodeTx(txConfig, txb.GetTx()) + require.NoError(t, err) + _, err = txutils.DecodeTx(txConfig, txbytes) + require.NoError(t, err) + + unlock = SetSDKConfigContext("cosmos") + emptyCodec, emptyTxConfig, err := CreateCodec([]RegisterInterfaces{ + auth.AppModuleBasic{}.RegisterInterfaces, + }) + require.NoError(t, err) + unlock() + + _, _, err = emptyCodec.GetMsgV1Signers(&opchildtypes.MsgUpdateOracle{}) + require.Error(t, err) + + txf = txf.WithTxConfig(emptyTxConfig) + txb, err = txf.BuildUnsignedTx(&opchildtypes.MsgUpdateOracle{}) + require.NoError(t, err) + txbytes, err = txutils.EncodeTx(emptyTxConfig, txb.GetTx()) + require.NoError(t, err) + _, err = txutils.DecodeTx(emptyTxConfig, txbytes) + require.Error(t, err) +} diff --git a/keys/keyring.go b/keys/keyring.go index c46c166..c6818db 100644 --- a/keys/keyring.go +++ b/keys/keyring.go @@ -11,11 +11,17 @@ import ( "github.com/cosmos/cosmos-sdk/crypto/keyring" ) +// GetKeyDir returns the key directory based on the home path and chain ID. func GetKeyDir(homePath string, chainId string) string { return path.Join(homePath, chainId) } +// GetKeyBase returns a keybase based on the given chain ID and directory. +// If the directory is empty, an in-memory keybase is returned. func GetKeyBase(chainId string, dir string, cdc codec.Codec, userInput io.Reader) (keyring.Keyring, error) { + if dir == "" { + return keyring.NewInMemory(cdc), nil + } return keyring.New(chainId, "test", GetKeyDir(dir, chainId), userInput, cdc) } diff --git a/keys/keyring_test.go b/keys/keyring_test.go new file mode 100644 index 0000000..4e83f3a --- /dev/null +++ b/keys/keyring_test.go @@ -0,0 +1,26 @@ +package keys + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/crypto/keyring" +) + +func TestGetKeyDir(t *testing.T) { + require.Equal(t, "homePath/chainId", GetKeyDir("homePath", "chainId")) + require.Equal(t, "chainId", GetKeyDir("", "chainId")) +} + +func TestGetKeyBase(t *testing.T) { + keybase, err := GetKeyBase("chainId", "dir", nil, nil) + require.NoError(t, err) + require.NotNil(t, keybase) + require.Equal(t, keyring.BackendTest, keybase.Backend()) + + keybase, err = GetKeyBase("chainId", "", nil, nil) + require.NoError(t, err) + require.NotNil(t, keybase) + require.Equal(t, keyring.BackendMemory, keybase.Backend()) +} diff --git a/merkle/db.go b/merkle/db.go new file mode 100644 index 0000000..b24680f --- /dev/null +++ b/merkle/db.go @@ -0,0 +1,135 @@ +package merkle + +import ( + "encoding/json" + "fmt" + + dbtypes "github.com/initia-labs/opinit-bots/db/types" + merkletypes "github.com/initia-labs/opinit-bots/merkle/types" + types "github.com/initia-labs/opinit-bots/types" + + "github.com/pkg/errors" +) + +// DeleteFutureFinalizedTrees deletes all finalized trees with sequence number greater than or equal to fromSequence. +func DeleteFutureFinalizedTrees(db types.DB, fromSequence uint64) error { + return db.Iterate(dbtypes.AppendSplitter(merkletypes.FinalizedTreePrefix), merkletypes.PrefixedFinalizedTreeKey(fromSequence), func(key, _ []byte) (bool, error) { + err := db.Delete(key) + if err != nil { + return true, err + } + return false, nil + }) +} + +// DeleteFutureWorkingTrees deletes all working trees with version greater than or equal to fromVersion. +func DeleteFutureWorkingTrees(db types.DB, fromVersion uint64) error { + return db.Iterate(dbtypes.AppendSplitter(merkletypes.WorkingTreePrefix), merkletypes.PrefixedWorkingTreeKey(fromVersion), func(key, _ []byte) (bool, error) { + err := db.Delete(key) + if err != nil { + return true, err + } + return false, nil + }) +} + +// GetWorkingTree returns the working tree with the given version. +func GetWorkingTree(db types.BasicDB, version uint64) (merkletypes.TreeInfo, error) { + data, err := db.Get(merkletypes.PrefixedWorkingTreeKey(version)) + if err != nil { + return merkletypes.TreeInfo{}, err + } + + var workingTree merkletypes.TreeInfo + err = json.Unmarshal(data, &workingTree) + return workingTree, err +} + +// SaveWorkingTree saves the working tree to the db. +func SaveWorkingTree(db types.BasicDB, workingTree merkletypes.TreeInfo) error { + value, err := workingTree.Value() + if err != nil { + return err + } + return db.Set(workingTree.Key(), value) +} + +// GetFinalizedTree returns the finalized tree with the given start leaf index. +func GetFinalizedTree(db types.BasicDB, startLeafIndex uint64) (merkletypes.FinalizedTreeInfo, error) { + data, err := db.Get(merkletypes.PrefixedFinalizedTreeKey(startLeafIndex)) + if err != nil { + return merkletypes.FinalizedTreeInfo{}, err + } + + var finalizedTree merkletypes.FinalizedTreeInfo + err = json.Unmarshal(data, &finalizedTree) + return finalizedTree, err +} + +// SaveFinalizedTree saves the finalized tree to the db. +func SaveFinalizedTree(db types.BasicDB, finalizedTree merkletypes.FinalizedTreeInfo) error { + value, err := finalizedTree.Value() + if err != nil { + return err + } + return db.Set(finalizedTree.Key(), value) +} + +// SaveNodes saves the nodes to the db. +func SaveNodes(db types.BasicDB, nodes ...merkletypes.Node) error { + for _, node := range nodes { + err := db.Set(node.Key(), node.Value()) + if err != nil { + return err + } + } + return nil +} + +// GetNodeBytes returns the node with the given tree index, height, and local node index. +func GetNodeBytes(db types.BasicDB, treeIndex uint64, height uint8, localNodeIndex uint64) ([]byte, error) { + return db.Get(merkletypes.PrefixedNodeKey(treeIndex, height, localNodeIndex)) +} + +// GetProofs returns the proofs for the leaf with the given index. +func GetProofs(db types.DB, leafIndex uint64) (proofs [][]byte, treeIndex uint64, rootData []byte, extraData []byte, err error) { + _, value, err := db.SeekPrevInclusiveKey(merkletypes.FinalizedTreePrefix, merkletypes.PrefixedFinalizedTreeKey(leafIndex)) + if errors.Is(err, dbtypes.ErrNotFound) { + return nil, 0, nil, nil, merkletypes.ErrUnfinalizedTree + } else if err != nil { + return nil, 0, nil, nil, err + } + + var treeInfo merkletypes.FinalizedTreeInfo + if err := json.Unmarshal(value, &treeInfo); err != nil { + return nil, 0, nil, nil, err + } + + // Check if the leaf index is in the tree + if leafIndex < treeInfo.StartLeafIndex { + return nil, 0, nil, nil, fmt.Errorf("leaf (`%d`) is not found in tree (`%d`)", leafIndex, treeInfo.TreeIndex) + } else if leafIndex-treeInfo.StartLeafIndex >= treeInfo.LeafCount { + return nil, 0, nil, nil, merkletypes.ErrUnfinalizedTree + } + + height := uint8(0) + localNodeIndex := leafIndex - treeInfo.StartLeafIndex + for height < treeInfo.TreeHeight { + // In `FinalizeWorkingTree`, we ensure that the leaf count of the tree is always a power of two by filling the leaves as needed. + // This ensures that there is always a sibling for each leaf node. + siblingIndex := localNodeIndex ^ 1 // flip the last bit to find the sibling + sibling, err := GetNodeBytes(db, treeInfo.TreeIndex, height, siblingIndex) + if err != nil { + return nil, 0, nil, nil, errors.Wrap(err, "failed to get sibling node from db") + } + + // append the sibling to the proofs + proofs = append(proofs, sibling) + + // update iteration variables + height++ + localNodeIndex = localNodeIndex / 2 + } + + return proofs, treeInfo.TreeIndex, treeInfo.Root, treeInfo.ExtraData, nil +} diff --git a/merkle/db_test.go b/merkle/db_test.go new file mode 100644 index 0000000..bb093ca --- /dev/null +++ b/merkle/db_test.go @@ -0,0 +1,233 @@ +package merkle + +import ( + "testing" + + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/initia-labs/opinit-bots/db" + merkletypes "github.com/initia-labs/opinit-bots/merkle/types" + "github.com/stretchr/testify/require" +) + +func TestSaveGetWorkingTree(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + _, err = GetWorkingTree(db, 3) + require.Error(t, err) + + workingTree := merkletypes.TreeInfo{ + Version: 10, + Index: 3, + LeafCount: 10, + StartLeafIndex: 5, + LastSiblings: map[uint8][]byte{ + 0: []byte("node1"), + }, + Done: true, + } + err = SaveWorkingTree(db, workingTree) + require.NoError(t, err) + + tree, err := GetWorkingTree(db, 10) + require.NoError(t, err) + require.Equal(t, workingTree, tree) +} + +func TestSaveGetFinalizedTree(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + _, err = GetFinalizedTree(db, 5) + require.Error(t, err) + + finalizedTree := merkletypes.FinalizedTreeInfo{ + TreeIndex: 5, + TreeHeight: 3, + Root: []byte("root"), + StartLeafIndex: 5, + LeafCount: 10, + ExtraData: []byte("extra data"), + } + err = SaveFinalizedTree(db, finalizedTree) + require.NoError(t, err) + + tree, err := GetFinalizedTree(db, 5) + require.NoError(t, err) + require.Equal(t, finalizedTree, tree) +} + +func TestSaveGetNodes(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + _, err = GetNodeBytes(db, 3, 5, 0) + require.Error(t, err) + _, err = GetNodeBytes(db, 3, 5, 1) + require.Error(t, err) + + node0 := merkletypes.Node{ + TreeIndex: 3, + Height: 5, + LocalNodeIndex: 0, + Data: []byte("node0"), + } + node1 := merkletypes.Node{ + TreeIndex: 3, + Height: 5, + LocalNodeIndex: 1, + Data: []byte("node1"), + } + + err = SaveNodes(db, node0, node1) + require.NoError(t, err) + + node0bytes, err := GetNodeBytes(db, 3, 5, 0) + require.NoError(t, err) + + require.Equal(t, node0.Value(), node0bytes) + node1bytes, err := GetNodeBytes(db, 3, 5, 1) + require.NoError(t, err) + require.Equal(t, node1.Value(), node1bytes) +} + +func TestGetProofs(t *testing.T) { + db, err := db.NewMemDB() + require.NoError(t, err) + + hashFn := ophosttypes.GenerateNodeHash + m, err := NewMerkle(hashFn) + require.NoError(t, err) + + require.NoError(t, m.InitializeWorkingTree(10, 1, 1)) + + // insert 6 nodes + nodes, err := m.InsertLeaf([]byte("node1")) + require.NoError(t, err) + err = SaveNodes(db, nodes...) + require.NoError(t, err) + nodes, err = m.InsertLeaf([]byte("node2")) + require.NoError(t, err) + err = SaveNodes(db, nodes...) + require.NoError(t, err) + nodes, err = m.InsertLeaf([]byte("node3")) + require.NoError(t, err) + err = SaveNodes(db, nodes...) + require.NoError(t, err) + nodes, err = m.InsertLeaf([]byte("node4")) + require.NoError(t, err) + err = SaveNodes(db, nodes...) + require.NoError(t, err) + nodes, err = m.InsertLeaf([]byte("node5")) + require.NoError(t, err) + err = SaveNodes(db, nodes...) + require.NoError(t, err) + nodes, err = m.InsertLeaf([]byte("node6")) + require.NoError(t, err) + err = SaveNodes(db, nodes...) + require.NoError(t, err) + + hash12 := hashFn([]byte("node1"), []byte("node2")) + hash34 := hashFn([]byte("node3"), []byte("node4")) + hash56 := hashFn([]byte("node5"), []byte("node6")) + hash66 := hashFn([]byte("node6"), []byte("node6")) + hash1234 := hashFn(hash12[:], hash34[:]) + hash5666 := hashFn(hash56[:], hash66[:]) + hashRoot := hashFn(hash1234[:], hash5666[:]) + + extraData := []byte("extra data") + finalizedTree, nodes, root, err := m.FinalizeWorkingTree(extraData) + require.NoError(t, err) + require.Equal(t, hashRoot[:], root) + + err = SaveFinalizedTree(db, *finalizedTree) + require.NoError(t, err) + err = SaveNodes(db, nodes...) + require.NoError(t, err) + + proofs, treeIndex, root_, extraData, err := GetProofs(db, 1) + require.NoError(t, err) + require.Equal(t, uint64(1), treeIndex) + require.Equal(t, root, root_) + require.Equal(t, []byte("extra data"), extraData) + require.Len(t, proofs, 3) + require.Equal(t, []byte("node2"), proofs[0]) + require.Equal(t, hash34[:], proofs[1]) + require.Equal(t, hash5666[:], proofs[2]) +} + +func TestDeleteFutureFinalizedTrees(t *testing.T) { //nolint + db, err := db.NewMemDB() + require.NoError(t, err) + + for i := 1; i <= 10; i++ { + finalizedTree := merkletypes.FinalizedTreeInfo{StartLeafIndex: uint64(i)} + err = SaveFinalizedTree(db, finalizedTree) + require.NoError(t, err) + } + + err = DeleteFutureFinalizedTrees(db, 11) + require.NoError(t, err) + for i := 1; i <= 10; i++ { + tree, err := GetFinalizedTree(db, uint64(i)) + require.NoError(t, err) + require.Equal(t, tree.StartLeafIndex, uint64(i)) + } + + err = DeleteFutureFinalizedTrees(db, 5) + require.NoError(t, err) + for i := 1; i <= 4; i++ { + tree, err := GetFinalizedTree(db, uint64(i)) + require.NoError(t, err) + require.Equal(t, tree.StartLeafIndex, uint64(i)) + } + for i := 5; i <= 10; i++ { + _, err := GetFinalizedTree(db, uint64(i)) + require.Error(t, err) + } + + err = DeleteFutureFinalizedTrees(db, 0) + require.NoError(t, err) + for i := 1; i <= 10; i++ { + _, err := GetFinalizedTree(db, uint64(i)) + require.Error(t, err) + } +} + +func TestDeleteFutureWorkingTrees(t *testing.T) { //nolint + db, err := db.NewMemDB() + require.NoError(t, err) + + for i := 1; i <= 10; i++ { + tree := merkletypes.TreeInfo{Version: uint64(i)} + err = SaveWorkingTree(db, tree) + require.NoError(t, err) + } + + err = DeleteFutureWorkingTrees(db, 11) + require.NoError(t, err) + for i := 1; i <= 10; i++ { + tree, err := GetWorkingTree(db, uint64(i)) + require.NoError(t, err) + require.Equal(t, tree.Version, uint64(i)) + } + + err = DeleteFutureWorkingTrees(db, 5) + require.NoError(t, err) + for i := 1; i <= 4; i++ { + tree, err := GetWorkingTree(db, uint64(i)) + require.NoError(t, err) + require.Equal(t, tree.Version, uint64(i)) + } + for i := 5; i <= 10; i++ { + _, err := GetWorkingTree(db, uint64(i)) + require.Error(t, err) + } + + err = DeleteFutureWorkingTrees(db, 0) + require.NoError(t, err) + for i := 1; i <= 10; i++ { + _, err := GetWorkingTree(db, uint64(i)) + require.Error(t, err) + } +} diff --git a/merkle/merkle.go b/merkle/merkle.go index 14a0048..27c6f77 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -2,12 +2,10 @@ package merkle import ( "crypto/rand" - "encoding/json" "errors" "fmt" "math/bits" - dbtypes "github.com/initia-labs/opinit-bots/db/types" merkletypes "github.com/initia-labs/opinit-bots/merkle/types" types "github.com/initia-labs/opinit-bots/types" ) @@ -20,7 +18,6 @@ type NodeGeneratorFn func([]byte, []byte) [32]byte // Merkle is a struct that manages the merkle tree which only holds the last sibling // of each level(height) to minimize the memory usage. type Merkle struct { - db types.DB workingTree *merkletypes.TreeInfo nodeGeneratorFn NodeGeneratorFn } @@ -48,57 +45,56 @@ func validateNodeGeneratorFn(fn NodeGeneratorFn) error { return nil } -func NewMerkle(db types.DB, nodeGeneratorFn NodeGeneratorFn) (*Merkle, error) { +func NewMerkle(nodeGeneratorFn NodeGeneratorFn) (*Merkle, error) { err := validateNodeGeneratorFn(nodeGeneratorFn) if err != nil { return nil, err } return &Merkle{ - db: db, nodeGeneratorFn: nodeGeneratorFn, }, nil } // InitializeWorkingTree resets the working tree with the given tree index and start leaf index. -func (m *Merkle) InitializeWorkingTree(treeIndex uint64, startLeafIndex uint64) error { +func (m *Merkle) InitializeWorkingTree(version uint64, treeIndex uint64, startLeafIndex uint64) error { if treeIndex < 1 || startLeafIndex < 1 { return fmt.Errorf("failed to initialize working tree index: %d, leaf: %d; invalid index", treeIndex, startLeafIndex) } m.workingTree = &merkletypes.TreeInfo{ + Version: version, Index: treeIndex, StartLeafIndex: startLeafIndex, LeafCount: 0, LastSiblings: make(map[uint8][]byte), Done: false, } - return nil } // FinalizeWorkingTree finalizes the working tree and returns the finalized tree info. -func (m *Merkle) FinalizeWorkingTree(extraData []byte) ([]types.RawKV, []byte /* root */, error) { +func (m *Merkle) FinalizeWorkingTree(extraData []byte) (*merkletypes.FinalizedTreeInfo, []merkletypes.Node, []byte /* root */, error) { if m.workingTree == nil { - return nil, nil, errors.New("working tree is not initialized") + return nil, nil, nil, errors.New("working tree is not initialized") } m.workingTree.Done = true if m.workingTree.LeafCount == 0 { - return nil, merkletypes.EmptyRootHash[:], nil + return nil, nil, merkletypes.EmptyRootHash[:], nil } - err := m.fillLeaves() + newNodes, err := m.fillLeaves() if err != nil { - return nil, nil, err + return nil, nil, nil, err } height, err := m.Height() if err != nil { - return nil, nil, err + return nil, nil, nil, err } treeRootHash := m.workingTree.LastSiblings[height] - finalizedTreeInfo := merkletypes.FinalizedTreeInfo{ + finalizedTreeInfo := &merkletypes.FinalizedTreeInfo{ TreeIndex: m.workingTree.Index, TreeHeight: height, Root: treeRootHash, @@ -107,85 +103,31 @@ func (m *Merkle) FinalizeWorkingTree(extraData []byte) ([]types.RawKV, []byte /* ExtraData: extraData, } - data, err := json.Marshal(finalizedTreeInfo) - if err != nil { - return nil, nil, err - } - - // Save the finalized tree info with the start leaf index as the key, - // when we need to get the proofs for the leaf, we can get the tree info with the start leaf index. - kvs := []types.RawKV{{ - Key: m.db.PrefixedKey(finalizedTreeInfo.Key()), - Value: data, - }} - - return kvs, treeRootHash, err -} - -func (m *Merkle) DeleteFutureFinalizedTrees(fromSequence uint64) error { - return m.db.PrefixedIterate(merkletypes.FinalizedTreeKey, nil, func(key, _ []byte) (bool, error) { - sequence := dbtypes.ToUint64Key(key[len(key)-8:]) - if sequence >= fromSequence { - err := m.db.Delete(key) - if err != nil { - return true, err - } - } - return false, nil - }) -} - -func (m *Merkle) DeleteFutureWorkingTrees(fromVersion uint64) error { - return m.db.PrefixedIterate(merkletypes.WorkingTreeKey, nil, func(key, _ []byte) (bool, error) { - version := dbtypes.ToUint64Key(key[len(key)-8:]) - if version >= fromVersion { - err := m.db.Delete(key) - if err != nil { - return true, err - } - } - return false, nil - }) + return finalizedTreeInfo, newNodes, treeRootHash, nil } // LoadWorkingTree loads the working tree from the database. // // It is used to load the working tree to handle the case where the bot is stopped. -func (m *Merkle) LoadWorkingTree(version uint64) error { - data, err := m.db.Get(merkletypes.PrefixedWorkingTreeKey(version)) - if err != nil { - return err - } +func (m *Merkle) PrepareWorkingTree(lastWorkingTree merkletypes.TreeInfo) error { + m.workingTree = &lastWorkingTree + m.workingTree.Version++ - var workingTree merkletypes.TreeInfo - err = json.Unmarshal(data, &workingTree) - m.workingTree = &workingTree - if err != nil { - return err - } else if workingTree.Done { - nextTreeIndex := workingTree.Index + 1 - nextStartLeafIndex := workingTree.StartLeafIndex + workingTree.LeafCount - return m.InitializeWorkingTree(nextTreeIndex, nextStartLeafIndex) + if m.workingTree.Done { + nextTreeIndex := m.workingTree.Index + 1 + nextStartLeafIndex := m.workingTree.StartLeafIndex + m.workingTree.LeafCount + return m.InitializeWorkingTree(m.workingTree.Version, nextTreeIndex, nextStartLeafIndex) } return nil } -// SaveWorkingTree saves the working tree to the database. -// -// It is used to save the working tree to handle the case where the bot is stopped. -func (m *Merkle) SaveWorkingTree(version uint64) error { - if m.workingTree == nil { - return errors.New("working tree is not initialized") - } - - data, err := json.Marshal(&m.workingTree) - if err != nil { - return err - } - return m.db.Set(merkletypes.PrefixedWorkingTreeKey(version), data) -} - // Height returns the height of the working tree. +// +// Example: +// - For 7 leaves, the height is 3. +// - For 8 leaves, the height is 3. +// - For 9 leaves, the height is 4. +// - For 16 leaves, the height is 4. func (m *Merkle) Height() (uint8, error) { if m.workingTree == nil { return 0, errors.New("working tree is not initialized") @@ -198,86 +140,62 @@ func (m *Merkle) Height() (uint8, error) { return types.MustIntToUint8(bits.Len64(leafCount - 1)), nil } -// GetWorkingTreeIndex returns the index of the working tree. -func (m *Merkle) GetWorkingTreeIndex() (uint64, error) { - if m.workingTree == nil { - return 0, errors.New("working tree is not initialized") - } - return m.workingTree.Index, nil -} - -// GetWorkingTreeLeafCount returns the leaf count of the working tree. -func (m *Merkle) GetWorkingTreeLeafCount() (uint64, error) { - if m.workingTree == nil { - return 0, errors.New("working tree is not initialized") - } - return m.workingTree.LeafCount, nil -} - -// GetStartLeafIndex returns the start leaf index of the working tree. -func (m *Merkle) GetStartLeafIndex() (uint64, error) { +// WorkingTree returns the working tree. +func (m *Merkle) WorkingTree() (merkletypes.TreeInfo, error) { if m.workingTree == nil { - return 0, errors.New("working tree is not initialized") + return merkletypes.TreeInfo{}, errors.New("working tree is not initialized") } - return m.workingTree.StartLeafIndex, nil -} - -func (m *Merkle) saveNode(height uint8, localNodeIndex uint64, data []byte) error { - workingTreeIndex, err := m.GetWorkingTreeIndex() - if err != nil { - return err - } - return m.db.Set(merkletypes.PrefixedNodeKey(workingTreeIndex, height, localNodeIndex), data) -} - -func (m *Merkle) getNode(treeIndex uint64, height uint8, localNodeIndex uint64) ([]byte, error) { - return m.db.Get(merkletypes.PrefixedNodeKey(treeIndex, height, localNodeIndex)) + return *m.workingTree, nil } // fillLeaves fills the rest of the leaves with the last leaf. -func (m *Merkle) fillLeaves() error { +func (m *Merkle) fillLeaves() ([]merkletypes.Node, error) { if m.workingTree == nil { - return errors.New("working tree is not initialized") + return nil, errors.New("working tree is not initialized") } height, err := m.Height() if err != nil { - return err + return nil, err } numRestLeaves := 1<= treeInfo.LeafCount { - return nil, 0, nil, nil, merkletypes.ErrUnfinalizedTree - } - - height := uint8(0) - localNodeIndex := leafIndex - treeInfo.StartLeafIndex - for height < treeInfo.TreeHeight { - siblingIndex := localNodeIndex ^ 1 // flip the last bit to find the sibling - sibling, err := m.getNode(treeInfo.TreeIndex, height, siblingIndex) - if err != nil { - return nil, 0, nil, nil, err - } - - // append the sibling to the proofs - proofs = append(proofs, sibling) - - // update iteration variables - height++ - localNodeIndex = localNodeIndex / 2 - } - - return proofs, treeInfo.TreeIndex, treeInfo.Root, treeInfo.ExtraData, nil + return newNodes, nil } diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index 20462b9..7acfd4d 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -1,18 +1,16 @@ package merkle import ( - "encoding/json" "testing" "golang.org/x/crypto/sha3" ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" - "github.com/initia-labs/opinit-bots/db" merkletypes "github.com/initia-labs/opinit-bots/merkle/types" "github.com/stretchr/testify/require" ) -func Test_validateNodeGeneratorFn(t *testing.T) { +func TestValidateNodeGeneratorFn(t *testing.T) { fnNonCommutable := func(a, b []byte) [32]byte { return sha3.Sum256(append(a, b...)) } @@ -22,87 +20,359 @@ func Test_validateNodeGeneratorFn(t *testing.T) { require.NoError(t, validateNodeGeneratorFn(fnCommutable)) } -func Test_MerkleTree_LastSibling(t *testing.T) { - tempDir := t.TempDir() - db, err := db.NewDB(tempDir) +func TestInitializeWorkingTree(t *testing.T) { + hashFn := ophosttypes.GenerateNodeHash + m, err := NewMerkle(hashFn) + require.NoError(t, err) + + cases := []struct { + title string + version uint64 + treeIndex uint64 + startLeafIndex uint64 + expected bool + }{ + {"simple treeIndex, startLeafIndex", 10, 5, 10, true}, + {"zero treeIndex", 10, 0, 3, false}, + {"zero startLeafIndex", 10, 3, 0, false}, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + err := m.InitializeWorkingTree(tc.version, tc.treeIndex, tc.startLeafIndex) + if tc.expected { + require.NoError(t, err) + require.Equal(t, &merkletypes.TreeInfo{ + Version: tc.version, + Index: tc.treeIndex, + StartLeafIndex: tc.startLeafIndex, + LeafCount: 0, + LastSiblings: make(map[uint8][]byte), + Done: false, + }, m.workingTree) + } else { + require.Error(t, err) + } + }) + } +} + +func TestHeight(t *testing.T) { + hashFn := ophosttypes.GenerateNodeHash + m, err := NewMerkle(hashFn) + require.NoError(t, err) + + cases := []struct { + title string + tree *merkletypes.TreeInfo + height uint8 + expected bool + }{ + {"0 leaf", &merkletypes.TreeInfo{LeafCount: 0}, 0, true}, + {"1 leaves", &merkletypes.TreeInfo{LeafCount: 1}, 1, true}, + {"2 leaves", &merkletypes.TreeInfo{LeafCount: 2}, 1, true}, + {"5 leaves", &merkletypes.TreeInfo{LeafCount: 5}, 3, true}, + {"1048576 leaves", &merkletypes.TreeInfo{LeafCount: 1048576}, 20, true}, + {"no tree", nil, 0, false}, + } + + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + m.workingTree = tc.tree + height, err := m.Height() + if tc.expected { + require.NoError(t, err) + require.Equal(t, tc.height, height) + } else { + require.Error(t, err) + } + }) + } +} + +func TestWorkingTree(t *testing.T) { + hashFn := ophosttypes.GenerateNodeHash + m, err := NewMerkle(hashFn) + require.NoError(t, err) + + _, err = m.WorkingTree() + require.Error(t, err) + + err = m.InitializeWorkingTree(10, 1, 1) + require.NoError(t, err) + + _, err = m.WorkingTree() require.NoError(t, err) +} + +func TestFillLeaves(t *testing.T) { + hashFn := ophosttypes.GenerateNodeHash + m, err := NewMerkle(hashFn) + require.NoError(t, err) + + _, err = m.fillLeaves() + require.Error(t, err) + + nodeData := []byte("node") + + hash12 := hashFn(nodeData, nodeData) + hash56 := hashFn([]byte("node"), []byte("node")) + hash66 := hashFn([]byte("node"), []byte("node")) + hash1234 := hashFn(hash12[:], hash12[:]) + hash5666 := hashFn(hash56[:], hash66[:]) + hashRoot := hashFn(hash1234[:], hash5666[:]) + + cases := []struct { + title string + leaves uint64 + nodes []merkletypes.Node + expected bool + }{ + {"0 leaf", 0, []merkletypes.Node{ + { + TreeIndex: 1, + Height: 0, + LocalNodeIndex: 0, + Data: []byte(nil), + }, + }, true}, + {"1 leaves", 1, []merkletypes.Node{ + { + TreeIndex: 1, + Height: 0, + LocalNodeIndex: 1, + Data: []byte("node"), + }, + { + TreeIndex: 1, + Height: 1, + LocalNodeIndex: 0, + Data: hash12[:], + }, + }, true}, + {"2 leaves", 2, nil, true}, + {"5 leaves", 5, []merkletypes.Node{ + { + TreeIndex: 1, + Height: 0, + LocalNodeIndex: 5, + Data: []byte("node"), + }, + { + TreeIndex: 1, + Height: 1, + LocalNodeIndex: 2, + Data: hash56[:], + }, + { + TreeIndex: 1, + Height: 0, + LocalNodeIndex: 6, + Data: []byte("node"), + }, + { + TreeIndex: 1, + Height: 0, + LocalNodeIndex: 7, + Data: []byte("node"), + }, + { + TreeIndex: 1, + Height: 1, + LocalNodeIndex: 3, + Data: hash66[:], + }, + { + TreeIndex: 1, + Height: 2, + LocalNodeIndex: 1, + Data: hash5666[:], + }, + { + TreeIndex: 1, + Height: 3, + LocalNodeIndex: 0, + Data: hashRoot[:], + }, + }, true}, + {"1048576 leaves", 1048576, nil, true}, + } + for _, tc := range cases { + t.Run(tc.title, func(t *testing.T) { + err = m.InitializeWorkingTree(10, 1, 1) + require.NoError(t, err) + + for i := uint64(0); i < tc.leaves; i++ { + _, err := m.InsertLeaf([]byte("node")) + require.NoError(t, err) + } + + nodes, err := m.fillLeaves() + require.NoError(t, err) + + if tc.expected { + require.NoError(t, err) + require.Equal(t, tc.nodes, nodes) + } else { + require.Error(t, err) + } + }) + } +} + +func TestInsertLeaf(t *testing.T) { hashFn := ophosttypes.GenerateNodeHash - m, err := NewMerkle(db, hashFn) + m, err := NewMerkle(hashFn) require.NoError(t, err) - require.NoError(t, m.InitializeWorkingTree(1, 1)) + require.NoError(t, m.InitializeWorkingTree(10, 1, 1)) // empty tree require.Len(t, m.workingTree.LastSiblings, 0) // 1 node - require.NoError(t, m.InsertLeaf([]byte("node1"))) + nodes, err := m.InsertLeaf([]byte("node1")) + require.NoError(t, err) require.Len(t, m.workingTree.LastSiblings, 1) require.Equal(t, []byte("node1"), m.workingTree.LastSiblings[0]) + require.Len(t, nodes, 1) + require.Equal(t, merkletypes.Node{ + TreeIndex: 1, + Height: 0, + LocalNodeIndex: 0, + Data: []byte("node1"), + }, nodes[0]) // 2 nodes hash12 := hashFn([]byte("node1"), []byte("node2")) - require.NoError(t, m.InsertLeaf([]byte("node2"))) + nodes, err = m.InsertLeaf([]byte("node2")) + require.NoError(t, err) require.Len(t, m.workingTree.LastSiblings, 2) require.Equal(t, []byte("node2"), m.workingTree.LastSiblings[0]) require.Equal(t, hash12[:], m.workingTree.LastSiblings[1]) + require.Len(t, nodes, 2) + require.Equal(t, merkletypes.Node{ + TreeIndex: 1, + Height: 0, + LocalNodeIndex: 1, + Data: []byte("node2"), + }, nodes[0]) + require.Equal(t, merkletypes.Node{ + TreeIndex: 1, + Height: 1, + LocalNodeIndex: 0, + Data: hash12[:], + }, nodes[1]) // 3 nodes - require.NoError(t, m.InsertLeaf([]byte("node3"))) + nodes, err = m.InsertLeaf([]byte("node3")) + require.NoError(t, err) require.Len(t, m.workingTree.LastSiblings, 2) require.Equal(t, []byte("node3"), m.workingTree.LastSiblings[0]) require.Equal(t, hash12[:], m.workingTree.LastSiblings[1]) + require.Len(t, nodes, 1) + require.Equal(t, merkletypes.Node{ + TreeIndex: 1, + Height: 0, + LocalNodeIndex: 2, + Data: []byte("node3"), + }, nodes[0]) // 4 nodes hash34 := hashFn([]byte("node3"), []byte("node4")) hash1234 := hashFn(hash12[:], hash34[:]) - require.NoError(t, m.InsertLeaf([]byte("node4"))) + nodes, err = m.InsertLeaf([]byte("node4")) + require.NoError(t, err) require.Len(t, m.workingTree.LastSiblings, 3) require.Equal(t, []byte("node4"), m.workingTree.LastSiblings[0]) require.Equal(t, hash34[:], m.workingTree.LastSiblings[1]) require.Equal(t, hash1234[:], m.workingTree.LastSiblings[2]) + require.Len(t, nodes, 3) + require.Equal(t, merkletypes.Node{ + TreeIndex: 1, + Height: 0, + LocalNodeIndex: 3, + Data: []byte("node4"), + }, nodes[0]) + require.Equal(t, merkletypes.Node{ + TreeIndex: 1, + Height: 1, + LocalNodeIndex: 1, + Data: hash34[:], + }, nodes[1]) + require.Equal(t, merkletypes.Node{ + TreeIndex: 1, + Height: 1, + LocalNodeIndex: 1, + Data: hash34[:], + }, nodes[1]) // 5 nodes - require.NoError(t, m.InsertLeaf([]byte("node5"))) + nodes, err = m.InsertLeaf([]byte("node5")) + require.NoError(t, err) require.Len(t, m.workingTree.LastSiblings, 3) require.Equal(t, []byte("node5"), m.workingTree.LastSiblings[0]) require.Equal(t, hash34[:], m.workingTree.LastSiblings[1]) require.Equal(t, hash1234[:], m.workingTree.LastSiblings[2]) + require.Len(t, nodes, 1) + require.Equal(t, merkletypes.Node{ + TreeIndex: 1, + Height: 0, + LocalNodeIndex: 4, + Data: []byte("node5"), + }, nodes[0]) // 6 nodes hash56 := hashFn([]byte("node5"), []byte("node6")) - require.NoError(t, m.InsertLeaf([]byte("node6"))) + nodes, err = m.InsertLeaf([]byte("node6")) + require.NoError(t, err) require.Len(t, m.workingTree.LastSiblings, 3) require.Equal(t, []byte("node6"), m.workingTree.LastSiblings[0]) require.Equal(t, hash56[:], m.workingTree.LastSiblings[1]) require.Equal(t, hash1234[:], m.workingTree.LastSiblings[2]) + require.Len(t, nodes, 2) + require.Equal(t, merkletypes.Node{ + TreeIndex: 1, + Height: 0, + LocalNodeIndex: 5, + Data: []byte("node6"), + }, nodes[0]) + require.Equal(t, merkletypes.Node{ + TreeIndex: 1, + Height: 1, + LocalNodeIndex: 2, + Data: hash56[:], + }, nodes[1]) } -func Test_FinalizeWorkingTree(t *testing.T) { - tempDir := t.TempDir() - db, err := db.NewDB(tempDir) - require.NoError(t, err) - +func TestFinalizeWorkingTree(t *testing.T) { hashFn := ophosttypes.GenerateNodeHash - m, err := NewMerkle(db, hashFn) + m, err := NewMerkle(hashFn) require.NoError(t, err) - require.NoError(t, m.InitializeWorkingTree(1, 1)) + require.NoError(t, m.InitializeWorkingTree(10, 1, 1)) // empty tree - kvs, root, err := m.FinalizeWorkingTree(nil) + finalizedTree, newNodes, root, err := m.FinalizeWorkingTree(nil) require.NoError(t, err) - require.Len(t, kvs, 0) require.Equal(t, merkletypes.EmptyRootHash[:], root) + require.Nil(t, finalizedTree) + require.Len(t, newNodes, 0) // insert 6 nodes - require.NoError(t, m.InsertLeaf([]byte("node1"))) - require.NoError(t, m.InsertLeaf([]byte("node2"))) - require.NoError(t, m.InsertLeaf([]byte("node3"))) - require.NoError(t, m.InsertLeaf([]byte("node4"))) - require.NoError(t, m.InsertLeaf([]byte("node5"))) - require.NoError(t, m.InsertLeaf([]byte("node6"))) + _, err = m.InsertLeaf([]byte("node1")) + require.NoError(t, err) + _, err = m.InsertLeaf([]byte("node2")) + require.NoError(t, err) + _, err = m.InsertLeaf([]byte("node3")) + require.NoError(t, err) + _, err = m.InsertLeaf([]byte("node4")) + require.NoError(t, err) + _, err = m.InsertLeaf([]byte("node5")) + require.NoError(t, err) + _, err = m.InsertLeaf([]byte("node6")) + require.NoError(t, err) hash12 := hashFn([]byte("node1"), []byte("node2")) hash34 := hashFn([]byte("node3"), []byte("node4")) @@ -113,13 +383,12 @@ func Test_FinalizeWorkingTree(t *testing.T) { hashRoot := hashFn(hash1234[:], hash5666[:]) extraData := []byte("extra data") - kvs, root, err = m.FinalizeWorkingTree(extraData) + finalizedTree, newNodes, root, err = m.FinalizeWorkingTree(extraData) require.NoError(t, err) require.Equal(t, hashRoot[:], root) - require.Len(t, kvs, 1) + // 7, 8, 78, 5678, 12345678 + require.Len(t, newNodes, 5) - var info merkletypes.FinalizedTreeInfo - require.NoError(t, json.Unmarshal(kvs[0].Value, &info)) require.Equal(t, merkletypes.FinalizedTreeInfo{ TreeIndex: 1, TreeHeight: 3, @@ -127,51 +396,5 @@ func Test_FinalizeWorkingTree(t *testing.T) { StartLeafIndex: 1, LeafCount: 6, ExtraData: extraData, - }, info) -} - -func Test_GetProofs(t *testing.T) { - tempDir := t.TempDir() - db, err := db.NewDB(tempDir) - require.NoError(t, err) - - hashFn := ophosttypes.GenerateNodeHash - m, err := NewMerkle(db, hashFn) - require.NoError(t, err) - - require.NoError(t, m.InitializeWorkingTree(1, 1)) - - // insert 6 nodes - require.NoError(t, m.InsertLeaf([]byte("node1"))) - require.NoError(t, m.InsertLeaf([]byte("node2"))) - require.NoError(t, m.InsertLeaf([]byte("node3"))) - require.NoError(t, m.InsertLeaf([]byte("node4"))) - require.NoError(t, m.InsertLeaf([]byte("node5"))) - require.NoError(t, m.InsertLeaf([]byte("node6"))) - - hash12 := hashFn([]byte("node1"), []byte("node2")) - hash34 := hashFn([]byte("node3"), []byte("node4")) - hash56 := hashFn([]byte("node5"), []byte("node6")) - hash66 := hashFn([]byte("node6"), []byte("node6")) - hash1234 := hashFn(hash12[:], hash34[:]) - hash5666 := hashFn(hash56[:], hash66[:]) - hashRoot := hashFn(hash1234[:], hash5666[:]) - - extraData := []byte("extra data") - kvs, root, err := m.FinalizeWorkingTree(extraData) - require.NoError(t, err) - require.Equal(t, hashRoot[:], root) - - // store batch kvs to db - require.NoError(t, db.RawBatchSet(kvs...)) - - proofs, treeIndex, root_, extraData, err := m.GetProofs(1) - require.NoError(t, err) - require.Equal(t, uint64(1), treeIndex) - require.Equal(t, root, root_) - require.Equal(t, []byte("extra data"), extraData) - require.Len(t, proofs, 3) - require.Equal(t, []byte("node2"), proofs[0]) - require.Equal(t, hash34[:], proofs[1]) - require.Equal(t, hash5666[:], proofs[2]) + }, *finalizedTree) } diff --git a/merkle/types/db.go b/merkle/types/db.go index 6d6e14d..c259924 100644 --- a/merkle/types/db.go +++ b/merkle/types/db.go @@ -1,6 +1,29 @@ +// DB Structure +// +// * WorkingTree (TreeInfo) +// - Key: `working_tree/${version}` +// - Description: Stores the tree information at the given chain height ('version'). +// +// * FinalizedTree +// - Key: `finalized_tree/${start_leaf_index}` +// - Description: Stores the finalized tree starting from the given `start_leaf_index`, +// which corresponds to the first L2 sequence number of the first withdrawal. +// +// * Node +// - Key: `node/${tree_index}${height}${local_node_index}` +// - Description: Stores node information at a specific position in the tree. +// - `tree_index`: Incremental index identifying the tree. +// - `height`: Vertical position of the node in the tree. +// - `local_node_index`: Index of the node within the tree, representing the order in which the node was added. package types -type TreeInfo struct { +import ( + "encoding/json" + + "github.com/pkg/errors" +) + +type LegacyTreeInfo struct { // Index of the tree used as prefix for the keys Index uint64 `json:"index"` @@ -17,6 +40,73 @@ type TreeInfo struct { Done bool `json:"done"` } +func (t LegacyTreeInfo) Migrate(version uint64) TreeInfo { + return TreeInfo{ + Version: version, + Index: t.Index, + LeafCount: t.LeafCount, + StartLeafIndex: t.StartLeafIndex, + LastSiblings: t.LastSiblings, + Done: t.Done, + } +} + +type TreeInfo struct { + // Version represents the chain height. We store the tree information for each height to + // maintain a record of the state of the tree at that specific height. + Version uint64 `json:"version"` + + // Index of the tree used as prefix for the keys + Index uint64 `json:"index"` + + // Number of leaves in the tree + LeafCount uint64 `json:"leaf_count"` + + // Cumulative number of leaves all the way up to the current tree + StartLeafIndex uint64 `json:"start_leaf_index"` + + // Last sibling of the height(level) of the tree + LastSiblings map[uint8][]byte `json:"last_siblings"` + + // Flag to indicate if the tree is finalized + Done bool `json:"done"` +} + +func NewTreeInfo(version uint64, index uint64, leafCount uint64, startLeafIndex uint64, lastSiblings map[uint8][]byte, done bool) TreeInfo { + return TreeInfo{ + Version: version, + Index: index, + LeafCount: leafCount, + StartLeafIndex: startLeafIndex, + LastSiblings: lastSiblings, + Done: done, + } +} + +func (t TreeInfo) Key() []byte { + return PrefixedWorkingTreeKey(t.Version) +} + +func (t TreeInfo) Value() ([]byte, error) { + return t.Marshal() +} + +func (t TreeInfo) Marshal() ([]byte, error) { + bz, err := json.Marshal(&t) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal tree info") + } + return bz, nil +} + +func (t *TreeInfo) Unmarshal(data []byte) error { + err := json.Unmarshal(data, t) + if err != nil { + return errors.Wrap(err, "failed to unmarshal tree info") + } + return nil +} + type FinalizedTreeInfo struct { // TreeIndex is the index of the tree used as prefix for the keys, // which is incremented by 1 for each new tree. @@ -30,6 +120,67 @@ type FinalizedTreeInfo struct { ExtraData []byte `json:"extra_data,omitempty"` } +func NewFinalizedTreeInfo(treeIndex uint64, treeHeight uint8, root []byte, startLeafIndex uint64, leafCount uint64, extraData []byte) FinalizedTreeInfo { + return FinalizedTreeInfo{ + TreeIndex: treeIndex, + TreeHeight: treeHeight, + Root: root, + StartLeafIndex: startLeafIndex, + LeafCount: leafCount, + ExtraData: extraData, + } +} + func (f FinalizedTreeInfo) Key() []byte { + // Store the finalized tree information with the start leaf index as its prefix. + // This makes it easier to retrieve proofs using the L2 sequence number of the withdrawal request. + // For more details, see the `GetProofs()` function. return PrefixedFinalizedTreeKey(f.StartLeafIndex) } + +func (f FinalizedTreeInfo) Value() ([]byte, error) { + return f.Marshal() +} + +func (f FinalizedTreeInfo) Marshal() ([]byte, error) { + bz, err := json.Marshal(&f) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal finalized tree info") + } + return bz, nil +} + +func (f *FinalizedTreeInfo) Unmarshal(data []byte) error { + err := json.Unmarshal(data, f) + if err != nil { + return errors.Wrap(err, "failed to unmarshal finalized tree info") + } + return nil +} + +type Node struct { + // TreeIndex is the index of the tree used as prefix for the keys. + TreeIndex uint64 `json:"tree_index"` + // Height of the node in the tree + Height uint8 `json:"height"` + // LocalNodeIndex is the index of the node at the given height + LocalNodeIndex uint64 `json:"local_node_index"` + Data []byte `json:"data"` +} + +func NewNode(treeIndex uint64, height uint8, localNodeIndex uint64, data []byte) Node { + return Node{ + TreeIndex: treeIndex, + Height: height, + LocalNodeIndex: localNodeIndex, + Data: data, + } +} + +func (n Node) Key() []byte { + return PrefixedNodeKey(n.TreeIndex, n.Height, n.LocalNodeIndex) +} + +func (n Node) Value() []byte { + return n.Data +} diff --git a/merkle/types/db_test.go b/merkle/types/db_test.go new file mode 100644 index 0000000..0143f3d --- /dev/null +++ b/merkle/types/db_test.go @@ -0,0 +1,121 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewTreeInfo(t *testing.T) { + tree := NewTreeInfo(10, 1, 2, 3, map[uint8][]byte{1: {0x1}, 2: {0x2}}, true) + require.Equal(t, uint64(1), tree.Index) + require.Equal(t, uint64(2), tree.LeafCount) + require.Equal(t, uint64(3), tree.StartLeafIndex) + require.Equal(t, map[uint8][]byte{1: {0x1}, 2: {0x2}}, tree.LastSiblings) + require.True(t, tree.Done) +} + +func TestTreeKey(t *testing.T) { + tree := NewTreeInfo(10, 1, 2, 3, map[uint8][]byte{1: {0x1}, 2: {0x2}}, true) + require.Equal(t, append(WorkingTreePrefix, []byte{byte('/'), 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xA}...), tree.Key()) +} + +func TestTreeValue(t *testing.T) { + tree := NewTreeInfo(10, 1, 2, 3, map[uint8][]byte{1: {0x1}, 2: {0x2}}, true) + bz, err := tree.Value() + require.NoError(t, err) + require.Equal(t, []byte(`{"version":10,"index":1,"leaf_count":2,"start_leaf_index":3,"last_siblings":{"1":"AQ==","2":"Ag=="},"done":true}`), bz) +} + +func TestTreeMarshal(t *testing.T) { + tree := NewTreeInfo(10, 1, 2, 3, map[uint8][]byte{1: {0x1}, 2: {0x2}}, true) + bz, err := tree.Marshal() + require.NoError(t, err) + require.Equal(t, []byte(`{"version":10,"index":1,"leaf_count":2,"start_leaf_index":3,"last_siblings":{"1":"AQ==","2":"Ag=="},"done":true}`), bz) +} + +func TestTreeUnmarshal(t *testing.T) { + bz := []byte(`{"version":10,"index":1,"leaf_count":2,"start_leaf_index":3,"last_siblings":{"1":"AQ==","2":"Ag=="},"done":true}`) + tree := &TreeInfo{} + err := tree.Unmarshal(bz) + require.NoError(t, err) + + require.Equal(t, uint64(10), tree.Version) + require.Equal(t, uint64(1), tree.Index) + require.Equal(t, uint64(2), tree.LeafCount) + require.Equal(t, uint64(3), tree.StartLeafIndex) + require.Equal(t, map[uint8][]byte{1: {0x1}, 2: {0x2}}, tree.LastSiblings) + require.True(t, tree.Done) + + bz = []byte("") + tree = &TreeInfo{} + err = tree.Unmarshal(bz) + require.Error(t, err) +} + +func TestNewFinalizedTreeInfo(t *testing.T) { + tree := NewFinalizedTreeInfo(1, 2, []byte{0x1}, 3, 4, []byte{0x2}) + require.Equal(t, uint64(1), tree.TreeIndex) + require.Equal(t, uint8(2), tree.TreeHeight) + require.Equal(t, []byte{0x1}, tree.Root) + require.Equal(t, uint64(3), tree.StartLeafIndex) + require.Equal(t, uint64(4), tree.LeafCount) + require.Equal(t, []byte{0x2}, tree.ExtraData) +} + +func TestFinalizedTreeKey(t *testing.T) { + tree := NewFinalizedTreeInfo(1, 2, []byte{0x1}, 3, 4, []byte{0x2}) + require.Equal(t, append(FinalizedTreePrefix, []byte{byte('/'), 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}...), tree.Key()) +} + +func TestFinalizedTreeValue(t *testing.T) { + tree := NewFinalizedTreeInfo(1, 2, []byte{0x1}, 3, 4, []byte{0x2}) + bz, err := tree.Value() + require.NoError(t, err) + require.Equal(t, []byte(`{"tree_index":1,"tree_height":2,"root":"AQ==","start_leaf_index":3,"leaf_count":4,"extra_data":"Ag=="}`), bz) +} + +func TestFinalizedTreeMarshal(t *testing.T) { + tree := NewFinalizedTreeInfo(1, 2, []byte{0x1}, 3, 4, []byte{0x2}) + bz, err := tree.Marshal() + require.NoError(t, err) + require.Equal(t, []byte(`{"tree_index":1,"tree_height":2,"root":"AQ==","start_leaf_index":3,"leaf_count":4,"extra_data":"Ag=="}`), bz) +} + +func TestFinalizedTreeUnmarshal(t *testing.T) { + bz := []byte(`{"tree_index":1,"tree_height":2,"root":"AQ==","start_leaf_index":3,"leaf_count":4,"extra_data":"Ag=="}`) + tree := &FinalizedTreeInfo{} + err := tree.Unmarshal(bz) + require.NoError(t, err) + + require.Equal(t, uint64(1), tree.TreeIndex) + require.Equal(t, uint8(2), tree.TreeHeight) + require.Equal(t, []byte{0x1}, tree.Root) + require.Equal(t, uint64(3), tree.StartLeafIndex) + require.Equal(t, uint64(4), tree.LeafCount) + require.Equal(t, []byte{0x2}, tree.ExtraData) + + bz = []byte("") + tree = &FinalizedTreeInfo{} + err = tree.Unmarshal(bz) + require.Error(t, err) +} + +func TestNewNode(t *testing.T) { + node := NewNode(1, 2, 3, []byte{0x1}) + require.Equal(t, uint64(1), node.TreeIndex) + require.Equal(t, uint8(2), node.Height) + require.Equal(t, uint64(3), node.LocalNodeIndex) + require.Equal(t, []byte{0x1}, node.Data) +} + +func TestNodeKey(t *testing.T) { + node := NewNode(1, 2, 3, []byte{0x1}) + require.Equal(t, append(NodePrefix, []byte{byte('/'), 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}...), node.Key()) +} + +func TestNodeValue(t *testing.T) { + node := NewNode(1, 2, 3, []byte{0x1}) + bz := node.Value() + require.Equal(t, []byte{0x1}, bz) +} diff --git a/merkle/types/key.go b/merkle/types/key.go index 1865a24..449a8bc 100644 --- a/merkle/types/key.go +++ b/merkle/types/key.go @@ -2,16 +2,35 @@ package types import ( "encoding/binary" + "fmt" dbtypes "github.com/initia-labs/opinit-bots/db/types" ) var ( - FinalizedTreeKey = []byte("finalized_tree") - WorkingTreeKey = []byte("working_tree") - NodeKey = []byte("node") + FinalizedTreePrefix = []byte("finalized_tree") + WorkingTreePrefix = []byte("working_tree") + NodePrefix = []byte("node") + + FinalizedTreeKeyLength = len(FinalizedTreePrefix) + 1 + 8 + WorkingTreeKeyLength = len(WorkingTreePrefix) + 1 + 8 + NodeKeyLength = len(NodePrefix) + 1 + 8 + 1 + 8 ) +func PrefixedFinalizedTreeKey(startLeafIndex uint64) []byte { + return dbtypes.GenerateKey([][]byte{ + FinalizedTreePrefix, + dbtypes.FromUint64Key(startLeafIndex), + }) +} + +func PrefixedWorkingTreeKey(version uint64) []byte { + return dbtypes.GenerateKey([][]byte{ + WorkingTreePrefix, + dbtypes.FromUint64Key(version), + }) +} + func GetNodeKey(treeIndex uint64, height uint8, nodeIndex uint64) []byte { data := make([]byte, 17) binary.BigEndian.PutUint64(data, treeIndex) @@ -21,13 +40,44 @@ func GetNodeKey(treeIndex uint64, height uint8, nodeIndex uint64) []byte { } func PrefixedNodeKey(treeIndex uint64, height uint8, nodeIndex uint64) []byte { - return append(append(NodeKey, dbtypes.Splitter), GetNodeKey(treeIndex, height, nodeIndex)...) + return dbtypes.GenerateKey([][]byte{ + NodePrefix, + GetNodeKey(treeIndex, height, nodeIndex), + }) } -func PrefixedFinalizedTreeKey(startLeafIndex uint64) []byte { - return append(append(FinalizedTreeKey, dbtypes.Splitter), dbtypes.FromUint64Key(startLeafIndex)...) +func ParseFinalizedTreeKey(key []byte) (startLeafIndex uint64, err error) { + if len(key) != FinalizedTreeKeyLength { + return 0, fmt.Errorf("invalid finalized tree key bytes: expected %d, got %d", FinalizedTreeKeyLength, len(key)) + } + cursor := len(FinalizedTreePrefix) + 1 + + startLeafIndex = dbtypes.ToUint64Key(key[cursor : cursor+8]) + return } -func PrefixedWorkingTreeKey(version uint64) []byte { - return append(append(WorkingTreeKey, dbtypes.Splitter), dbtypes.FromUint64Key(version)...) +func ParseWorkingTreeKey(key []byte) (version uint64, err error) { + if len(key) != WorkingTreeKeyLength { + return 0, fmt.Errorf("invalid working tree key bytes: expected %d, got %d", WorkingTreeKeyLength, len(key)) + } + cursor := len(WorkingTreePrefix) + 1 + + version = dbtypes.ToUint64Key(key[cursor : cursor+8]) + return +} + +func ParseNodeKey(key []byte) (treeIndex uint64, height uint8, nodeIndex uint64, err error) { + if len(key) != NodeKeyLength { + return 0, 0, 0, fmt.Errorf("invalid node key bytes: expected %d, got %d", NodeKeyLength, len(key)) + } + cursor := len(NodePrefix) + 1 + + treeIndex = binary.BigEndian.Uint64(key[cursor : cursor+8]) + cursor += 8 + + height = key[cursor] + cursor += 1 + + nodeIndex = binary.BigEndian.Uint64(key[cursor : cursor+8]) + return } diff --git a/merkle/types/key_test.go b/merkle/types/key_test.go new file mode 100644 index 0000000..e231a00 --- /dev/null +++ b/merkle/types/key_test.go @@ -0,0 +1,66 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPrefixedFinalizedTreeKey(t *testing.T) { + startLeafIndex := uint64(256) + key := PrefixedFinalizedTreeKey(startLeafIndex) + require.Equal(t, key, append(FinalizedTreePrefix, []byte{byte('/'), 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0}...)) +} + +func TestPrefixedWorkingTreeKey(t *testing.T) { + treeIndex := uint64(256) + key := PrefixedWorkingTreeKey(treeIndex) + require.Equal(t, key, append(WorkingTreePrefix, []byte{byte('/'), 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0}...)) +} + +func TestGetNodeKey(t *testing.T) { + treeIndex := uint64(256) + height := uint8(3) + nodeIndex := uint64(16) + + key := GetNodeKey(treeIndex, height, nodeIndex) + require.Equal(t, key, []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10}) +} + +func TestPrefixedNodeKey(t *testing.T) { + treeIndex := uint64(256) + height := uint8(3) + nodeIndex := uint64(16) + + key := PrefixedNodeKey(treeIndex, height, nodeIndex) + require.Equal(t, key, append(NodePrefix, []byte{byte('/'), 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10}...)) +} + +func TestParseFinalizedTreeKey(t *testing.T) { + startLeafIndex := uint64(256) + key := PrefixedFinalizedTreeKey(startLeafIndex) + parsedStartLeafIndex, err := ParseFinalizedTreeKey(key) + require.NoError(t, err) + require.Equal(t, startLeafIndex, parsedStartLeafIndex) +} + +func TestParseWorkingTreeKey(t *testing.T) { + treeIndex := uint64(256) + key := PrefixedWorkingTreeKey(treeIndex) + parsedTreeIndex, err := ParseWorkingTreeKey(key) + require.NoError(t, err) + require.Equal(t, treeIndex, parsedTreeIndex) +} + +func TestParseNodeKey(t *testing.T) { + treeIndex := uint64(256) + height := uint8(3) + nodeIndex := uint64(16) + + key := PrefixedNodeKey(treeIndex, height, nodeIndex) + parsedTreeIndex, parsedHeight, parsedNodeIndex, err := ParseNodeKey(key) + require.NoError(t, err) + require.Equal(t, treeIndex, parsedTreeIndex) + require.Equal(t, height, parsedHeight) + require.Equal(t, nodeIndex, parsedNodeIndex) +} diff --git a/node/block_handler.go b/node/block_handler.go new file mode 100644 index 0000000..914474f --- /dev/null +++ b/node/block_handler.go @@ -0,0 +1,96 @@ +package node + +import ( + "fmt" + "time" + + abcitypes "github.com/cometbft/cometbft/abci/types" + prototypes "github.com/cometbft/cometbft/proto/tendermint/types" + rpccoretypes "github.com/cometbft/cometbft/rpc/core/types" + comettypes "github.com/cometbft/cometbft/types" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" +) + +// handleBeginBlock handles the begin block. +func (n *Node) handleBeginBlock(ctx types.Context, blockID []byte, protoBlock *prototypes.Block, latestHeight int64) error { + if n.beginBlockHandler != nil { + return n.beginBlockHandler(ctx, nodetypes.BeginBlockArgs{ + BlockID: blockID, + Block: *protoBlock, + LatestHeight: latestHeight, + }) + } + return nil +} + +// handleBlockTxs handles the block transactions. +func (n *Node) handleBlockTxs(ctx types.Context, block *rpccoretypes.ResultBlock, blockResult *rpccoretypes.ResultBlockResults, latestHeight int64) error { + if len(block.Block.Txs) != len(blockResult.TxsResults) { + return fmt.Errorf("mismatch in transactions and results count: %d vs %d", len(block.Block.Txs), len(blockResult.TxsResults)) + } + for txIndex, tx := range block.Block.Txs { + if n.txHandler != nil { + err := n.txHandler(ctx, nodetypes.TxHandlerArgs{ + BlockHeight: block.Block.Height, + BlockTime: block.Block.Time.UTC(), + LatestHeight: latestHeight, + TxIndex: int64(txIndex), + Tx: tx, + Success: blockResult.TxsResults[txIndex].Code == abcitypes.CodeTypeOK, + }) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to handle tx: tx_index: %d", txIndex)) + } + } + + err := n.handleEvents(ctx, block.Block.Height, block.Block.Time.UTC(), blockResult.TxsResults[txIndex].GetEvents(), latestHeight, tx, int64(txIndex)) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to handle events: tx_index: %d", txIndex)) + } + } + return nil +} + +// handleFinalizeBlock handles the finalize block. +func (n *Node) handleFinalizeBlock(ctx types.Context, blockHeight int64, blockTime time.Time, blockResult *rpccoretypes.ResultBlockResults, latestHeight int64) error { + return n.handleEvents(ctx, blockHeight, blockTime, blockResult.FinalizeBlockEvents, latestHeight, nil, 0) +} + +// handleEvent handles the event for the given transaction. +func (n *Node) handleEvents(ctx types.Context, blockHeight int64, blockTime time.Time, events []abcitypes.Event, latestHeight int64, tx comettypes.Tx, txIndex int64) error { + if len(n.eventHandlers) != 0 { + for eventIndex, event := range events { + err := n.handleEvent(ctx, blockHeight, blockTime, latestHeight, tx, txIndex, event) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to handle event: event_index: %d", eventIndex)) + } + } + } + return nil +} + +// handleEndBlock handles the end block. +func (n *Node) handleEndBlock(ctx types.Context, blockID []byte, protoBlock *prototypes.Block, latestHeight int64) error { + if n.endBlockHandler != nil { + return n.endBlockHandler(ctx, nodetypes.EndBlockArgs{ + BlockID: blockID, + Block: *protoBlock, + LatestHeight: latestHeight, + }) + } + return nil +} + +// handleRawBlock handles the raw block bytes. +func (n *Node) handleRawBlock(ctx types.Context, blockHeight int64, latestHeight int64, blockBytes []byte) error { + if n.rawBlockHandler != nil { + return n.rawBlockHandler(ctx, nodetypes.RawBlockArgs{ + BlockHeight: blockHeight, + LatestHeight: latestHeight, + BlockBytes: blockBytes, + }) + } + return nil +} diff --git a/node/broadcaster/account.go b/node/broadcaster/account.go index 38f356f..52b44a0 100644 --- a/node/broadcaster/account.go +++ b/node/broadcaster/account.go @@ -10,6 +10,7 @@ import ( btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" "github.com/initia-labs/opinit-bots/node/rpcclient" "github.com/initia-labs/opinit-bots/txutils" + "github.com/initia-labs/opinit-bots/types" ctypes "github.com/cometbft/cometbft/rpc/core/types" @@ -23,6 +24,7 @@ import ( authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" ) +// BroadcasterAccount is an account that can be used to sign and broadcast transactions. type BroadcasterAccount struct { cfg btypes.BroadcasterConfig txf tx.Factory @@ -36,18 +38,20 @@ type BroadcasterAccount struct { address sdk.AccAddress addressString string - BuildTxWithMessages btypes.BuildTxWithMessagesFn - PendingTxToProcessedMsgs btypes.PendingTxToProcessedMsgsFn + // Custom tx building functions, if not provided, the default functions will be used. + BuildTxWithMsgs btypes.BuildTxWithMsgsFn + // Custom tx message extraction function, if not provided, the default function will be used. + MsgsFromTx btypes.MsgsFromTxFn } -func NewBroadcasterAccount(cfg btypes.BroadcasterConfig, cdc codec.Codec, txConfig client.TxConfig, rpcClient *rpcclient.RPCClient, keyringConfig btypes.KeyringConfig) (*BroadcasterAccount, error) { +func NewBroadcasterAccount(ctx types.Context, cfg btypes.BroadcasterConfig, cdc codec.Codec, txConfig client.TxConfig, rpcClient *rpcclient.RPCClient, keyringConfig btypes.KeyringConfig) (*BroadcasterAccount, error) { err := keyringConfig.Validate() if err != nil { return nil, err } // setup keyring - keyBase, keyringRecord, err := cfg.GetKeyringRecord(cdc, &keyringConfig) + keyBase, keyringRecord, err := cfg.GetKeyringRecord(cdc, &keyringConfig, ctx.HomePath()) if err != nil { return nil, err } @@ -74,16 +78,16 @@ func NewBroadcasterAccount(cfg btypes.BroadcasterConfig, cdc codec.Codec, txConf address: addr, addressString: addrStr, - BuildTxWithMessages: keyringConfig.BuildTxWithMessages, - PendingTxToProcessedMsgs: keyringConfig.PendingTxToProcessedMsgs, + BuildTxWithMsgs: keyringConfig.BuildTxWithMsgs, + MsgsFromTx: keyringConfig.MsgsFromTx, } - if b.BuildTxWithMessages == nil { - b.BuildTxWithMessages = b.DefaultBuildTxWithMessages + if b.BuildTxWithMsgs == nil { + b.BuildTxWithMsgs = b.DefaultBuildTxWithMsgs } - if b.PendingTxToProcessedMsgs == nil { - b.PendingTxToProcessedMsgs = b.DefaultPendingTxToProcessedMsgs + if b.MsgsFromTx == nil { + b.MsgsFromTx = b.DefaultMsgsFromTx } b.txf = tx.Factory{}. @@ -97,7 +101,7 @@ func NewBroadcasterAccount(cfg btypes.BroadcasterConfig, cdc codec.Codec, txConf if keyringConfig.FeeGranter != nil { // setup keyring - _, feeGranterKeyringRecord, err := cfg.GetKeyringRecord(cdc, keyringConfig.FeeGranter) + _, feeGranterKeyringRecord, err := cfg.GetKeyringRecord(cdc, keyringConfig.FeeGranter, ctx.HomePath()) if err != nil { return nil, err } @@ -123,6 +127,7 @@ func (b BroadcasterAccount) Bech32Prefix() string { return b.cfg.Bech32Prefix } +// Load function loads the account sequence number and account number. func (b *BroadcasterAccount) Load(ctx context.Context) error { account, err := b.GetAccount(b.getClientCtx(ctx), b.address) if err != nil { @@ -242,6 +247,7 @@ func (b BroadcasterAccount) adjustEstimatedGas(gasUsed uint64) (uint64, error) { return uint64(gas), nil } +// SimulateAndSignTx simulates the transaction, adjusts the gas, and signs the transaction. func (b BroadcasterAccount) SimulateAndSignTx(ctx context.Context, msgs ...sdk.Msg) (authsigning.Tx, error) { _, adjusted, err := b.CalculateGas(ctx, msgs...) if err != nil { @@ -260,8 +266,8 @@ func (b BroadcasterAccount) SimulateAndSignTx(ctx context.Context, msgs ...sdk.M return txb.GetTx(), nil } -// buildTxWithMessages creates a transaction from the given messages. -func (b *BroadcasterAccount) DefaultBuildTxWithMessages( +// DefaultBuildTxWithMsgs creates a transaction with the provided messages and returns the encoded transaction. +func (b *BroadcasterAccount) DefaultBuildTxWithMsgs( ctx context.Context, msgs []sdk.Msg, ) ( @@ -278,10 +284,11 @@ func (b *BroadcasterAccount) DefaultBuildTxWithMessages( if err != nil { return nil, "", err } - return txBytes, btypes.TxHash(txBytes), nil + return txBytes, txutils.TxHash(txBytes), nil } -func (b *BroadcasterAccount) DefaultPendingTxToProcessedMsgs( +// DefaultMsgsFromTx extracts the messages from the transaction bytes. +func (b *BroadcasterAccount) DefaultMsgsFromTx( txBytes []byte, ) ([]sdk.Msg, error) { tx, err := txutils.DecodeTx(b.txConfig, txBytes) diff --git a/node/broadcaster/broadcaster.go b/node/broadcaster/broadcaster.go index e80b84d..97391c6 100644 --- a/node/broadcaster/broadcaster.go +++ b/node/broadcaster/broadcaster.go @@ -1,7 +1,7 @@ package broadcaster import ( - "context" + "encoding/hex" "fmt" "slices" "sync" @@ -15,6 +15,7 @@ import ( "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" "github.com/initia-labs/opinit-bots/node/rpcclient" @@ -26,37 +27,36 @@ type Broadcaster struct { db types.DB cdc codec.Codec - logger *zap.Logger rpcClient *rpcclient.RPCClient - txConfig client.TxConfig - accounts []*BroadcasterAccount + txConfig client.TxConfig + accounts []*BroadcasterAccount + // address -> account index addressAccountMap map[string]int accountMu *sync.Mutex + // tx channel to receive processed msgs txChannel chan btypes.ProcessedMsgs txChannelStopped chan struct{} - // local pending txs, which is following Queue data structure pendingTxMu *sync.Mutex - pendingTxs []btypes.PendingTxInfo + // local pending txs, which is following Queue data structure + pendingTxs []btypes.PendingTxInfo - pendingProcessedMsgs []btypes.ProcessedMsgs + pendingProcessedMsgsBatch []btypes.ProcessedMsgs - lastProcessedBlockHeight int64 + syncedHeight int64 } func NewBroadcaster( cfg btypes.BroadcasterConfig, db types.DB, - logger *zap.Logger, cdc codec.Codec, txConfig client.TxConfig, rpcClient *rpcclient.RPCClient, ) (*Broadcaster, error) { b := &Broadcaster{ cdc: cdc, - logger: logger, db: db, rpcClient: rpcClient, @@ -68,9 +68,9 @@ func NewBroadcaster( txChannel: make(chan btypes.ProcessedMsgs), txChannelStopped: make(chan struct{}), - pendingTxMu: &sync.Mutex{}, - pendingTxs: make([]btypes.PendingTxInfo, 0), - pendingProcessedMsgs: make([]btypes.ProcessedMsgs, 0), + pendingTxMu: &sync.Mutex{}, + pendingTxs: make([]btypes.PendingTxInfo, 0), + pendingProcessedMsgsBatch: make([]btypes.ProcessedMsgs, 0), } // validate broadcaster config @@ -88,9 +88,11 @@ func NewBroadcaster( return b, nil } -func (b *Broadcaster) Initialize(ctx context.Context, status *rpccoretypes.ResultStatus, keyringConfigs []btypes.KeyringConfig) error { +// Initialize initializes the broadcaster with the given keyring configs. +// It loads pending txs and processed msgs batch from the db and prepares the broadcaster. +func (b *Broadcaster) Initialize(ctx types.Context, status *rpccoretypes.ResultStatus, keyringConfigs []btypes.KeyringConfig) error { for _, keyringConfig := range keyringConfigs { - account, err := NewBroadcasterAccount(b.cfg, b.cdc, b.txConfig, b.rpcClient, keyringConfig) + account, err := NewBroadcasterAccount(ctx, b.cfg, b.cdc, b.txConfig, b.rpcClient, keyringConfig) if err != nil { return err } @@ -102,125 +104,199 @@ func (b *Broadcaster) Initialize(ctx context.Context, status *rpccoretypes.Resul b.addressAccountMap[account.GetAddressString()] = len(b.accounts) - 1 } - // prepare broadcaster err := b.prepareBroadcaster(ctx, status.SyncInfo.LatestBlockTime) return errors.Wrap(err, "failed to prepare broadcaster") } -func (b Broadcaster) GetHeight() int64 { - return b.lastProcessedBlockHeight + 1 -} - +// SetSyncInfo sets the synced height of the broadcaster. func (b *Broadcaster) SetSyncInfo(height int64) { - b.lastProcessedBlockHeight = height + b.syncedHeight = height } -func (b *Broadcaster) prepareBroadcaster(ctx context.Context, lastBlockTime time.Time) error { - dbBatchKVs := make([]types.RawKV, 0) +// prepareBroadcaster prepares the broadcaster by loading pending txs and processed msgs batch from the db. +func (b *Broadcaster) prepareBroadcaster(ctx types.Context, lastBlockTime time.Time) error { + stage := b.db.NewStage() - loadedPendingTxs, err := b.loadPendingTxs() + err := b.loadPendingTxs(ctx, stage, lastBlockTime) if err != nil { return err } - if len(loadedPendingTxs) > 0 { - pendingTxTime := time.Unix(0, loadedPendingTxs[0].Timestamp) - // if we have pending txs, wait until timeout - if timeoutTime := pendingTxTime.Add(b.cfg.TxTimeout); lastBlockTime.Before(timeoutTime) { - waitingTime := timeoutTime.Sub(lastBlockTime) - timer := time.NewTimer(waitingTime) - b.logger.Info("waiting for pending txs to be processed", zap.Duration("waiting_time", waitingTime)) + err = b.loadProcessedMsgsBatch(ctx, stage) + if err != nil { + return err + } + + err = SaveProcessedMsgsBatch(stage, b.cdc, b.pendingProcessedMsgsBatch) + if err != nil { + return err + } + + return stage.Commit() +} + +// loadPendingTxs loads pending txs from db and waits until timeout if there are pending txs. +func (b *Broadcaster) loadPendingTxs(ctx types.Context, stage types.BasicDB, lastBlockTime time.Time) error { + pendingTxs, err := LoadPendingTxs(b.db) + if err != nil { + return err + } + ctx.Logger().Debug("load pending txs", zap.Int("count", len(pendingTxs))) + + if len(pendingTxs) == 0 { + return nil + } + + pendingTxTime := time.Unix(0, pendingTxs[0].Timestamp).UTC() + + // if we have pending txs, wait until timeout + if timeoutTime := pendingTxTime.Add(b.cfg.TxTimeout); lastBlockTime.Before(timeoutTime) { + waitingTime := timeoutTime.Sub(lastBlockTime) + timer := time.NewTimer(waitingTime) + defer timer.Stop() + + ctx.Logger().Info("waiting for pending txs to be processed", zap.Duration("waiting_time", waitingTime)) + + pollingTimer := time.NewTicker(ctx.PollingInterval()) + defer pollingTimer.Stop() + + for { select { case <-ctx.Done(): return ctx.Err() case <-timer.C: + break + case <-pollingTimer.C: } - } - // convert pending txs to raw kv pairs for deletion - pendingKVs, err := b.PendingTxsToRawKV(loadedPendingTxs, true) - if err != nil { - return err - } - - // add pending txs delegation to db batch - dbBatchKVs = append(dbBatchKVs, pendingKVs...) - - // convert pending txs to pending msgs - for i, txInfo := range loadedPendingTxs { - account, err := b.AccountByAddress(txInfo.Sender) - if err != nil { - return err + if len(pendingTxs) == 0 { + return nil } - msgs, err := account.PendingTxToProcessedMsgs(txInfo.Tx) + + txHash, err := hex.DecodeString(pendingTxs[0].TxHash) if err != nil { return err } - if txInfo.Save { - for i := 0; i < len(msgs); i += 5 { - end := i + 5 - if end > len(msgs) { - end = len(msgs) - } - - b.pendingProcessedMsgs = append(b.pendingProcessedMsgs, btypes.ProcessedMsgs{ - Sender: txInfo.Sender, - Msgs: slices.Clone(msgs[i:end]), - Timestamp: time.Now().UnixNano(), - Save: true, - }) + res, err := b.rpcClient.QueryTx(ctx, txHash) + if err == nil && res != nil && res.TxResult.Code == 0 { + ctx.Logger().Debug("transaction successfully included", + zap.String("hash", pendingTxs[0].TxHash), + zap.Int64("height", res.Height)) + err = DeletePendingTx(b.db, pendingTxs[0]) + if err != nil { + return err } + pendingTxs = pendingTxs[1:] + } else if err == nil && res != nil { + ctx.Logger().Warn("transaction failed", + zap.String("hash", pendingTxs[0].TxHash), + zap.Uint32("code", res.TxResult.Code), + zap.String("log", res.TxResult.Log)) } - - b.logger.Debug("pending tx", zap.Int("index", i), zap.String("tx", txInfo.String())) } } - loadedProcessedMsgs, err := b.loadProcessedMsgs() + err = DeletePendingTxs(stage, pendingTxs) if err != nil { return err } - // need to remove processed msgs from db before updating the timestamp - // because the timestamp is used as a key. - kvProcessedMsgs, err := b.ProcessedMsgsToRawKV(loadedProcessedMsgs, true) + processedMsgsBatch, err := b.pendingTxsToProcessedMsgsBatch(ctx, pendingTxs) if err != nil { return err } - dbBatchKVs = append(dbBatchKVs, kvProcessedMsgs...) - - // update timestamp of loaded processed msgs - for i, pendingMsgs := range loadedProcessedMsgs { - loadedProcessedMsgs[i].Timestamp = time.Now().UnixNano() - b.logger.Debug("pending msgs", zap.Int("index", i), zap.String("msgs", pendingMsgs.String())) - } + b.pendingProcessedMsgsBatch = append(b.pendingProcessedMsgsBatch, processedMsgsBatch...) + return nil +} - // save all pending msgs with updated timestamp to db - b.pendingProcessedMsgs = append(b.pendingProcessedMsgs, loadedProcessedMsgs...) - kvProcessedMsgs, err = b.ProcessedMsgsToRawKV(b.pendingProcessedMsgs, false) +// loadProcessedMsgsBatch loads processed msgs batch from db and updates the timestamp. +func (b *Broadcaster) loadProcessedMsgsBatch(ctx types.Context, stage types.BasicDB) error { + processedMsgsBatch, err := LoadProcessedMsgsBatch(b.db, b.cdc) if err != nil { return err } - dbBatchKVs = append(dbBatchKVs, kvProcessedMsgs...) + ctx.Logger().Debug("load pending processed msgs", zap.Int("count", len(processedMsgsBatch))) - // save all pending msgs first, then broadcast them - err = b.db.RawBatchSet(dbBatchKVs...) + // need to remove processed msgs from db before updating the timestamp + // because the timestamp is used as a key. + err = DeleteProcessedMsgsBatch(stage, processedMsgsBatch) if err != nil { return err } + + // update timestamp of loaded processed msgs + for i := range processedMsgsBatch { + processedMsgsBatch[i].Timestamp = types.CurrentNanoTimestamp() + ctx.Logger().Debug("pending msgs", zap.Int("index", i), zap.String("msgs", processedMsgsBatch[i].String())) + } + + // save all pending msgs with updated timestamp to db + b.pendingProcessedMsgsBatch = append(b.pendingProcessedMsgsBatch, processedMsgsBatch...) return nil } -func (b Broadcaster) AccountByIndex(index int) (*BroadcasterAccount, error) { - b.accountMu.Lock() - defer b.accountMu.Unlock() - if len(b.accounts) <= index { - return nil, fmt.Errorf("broadcaster account not found; length: %d, index: %d", len(b.accounts), index) +// pendingTxsToProcessedMsgsBatch converts pending txs to processed msgs batch. +func (b *Broadcaster) pendingTxsToProcessedMsgsBatch(ctx types.Context, pendingTxs []btypes.PendingTxInfo) ([]btypes.ProcessedMsgs, error) { + pendingProcessedMsgsBatch := make([]btypes.ProcessedMsgs, 0) + queues := make(map[string][]sdk.Msg) + + // convert pending txs to pending msgs + for i, pendingTx := range pendingTxs { + if !pendingTx.Save { + continue + } + + account, err := b.AccountByAddress(pendingTx.Sender) + if err != nil { + return nil, err + } + msgs, err := account.MsgsFromTx(pendingTx.Tx) + if err != nil { + return nil, err + } + queues[pendingTx.Sender] = append(queues[pendingTx.Sender], msgs...) + + pendingProcessedMsgsBatch = append(pendingProcessedMsgsBatch, MsgsToProcessedMsgs(queues)...) + ctx.Logger().Debug("pending tx", zap.Int("index", i), zap.String("tx", pendingTx.String())) } - return b.accounts[index], nil + return pendingProcessedMsgsBatch, nil +} + +// GetHeight returns the current height of the broadcaster. +func (b Broadcaster) GetHeight() int64 { + return b.syncedHeight + 1 +} + +// UpdateSyncedHeight updates the synced height of the broadcaster. +func (b *Broadcaster) UpdateSyncedHeight(height int64) { + b.syncedHeight = height } +// MsgsToProcessedMsgs converts msgs to processed msgs. +// It splits msgs into chunks of 5 msgs and creates processed msgs for each chunk. +func MsgsToProcessedMsgs(queues map[string][]sdk.Msg) []btypes.ProcessedMsgs { + res := make([]btypes.ProcessedMsgs, 0) + for sender := range queues { + msgs := queues[sender] + for i := 0; i < len(msgs); i += 5 { + end := i + 5 + if end > len(msgs) { + end = len(msgs) + } + + res = append(res, btypes.ProcessedMsgs{ + Sender: sender, + Msgs: slices.Clone(msgs[i:end]), + Timestamp: types.CurrentNanoTimestamp(), + Save: true, + }) + } + } + return res +} + +// AccountByAddress returns the broadcaster account by the given address. func (b Broadcaster) AccountByAddress(address string) (*BroadcasterAccount, error) { b.accountMu.Lock() defer b.accountMu.Unlock() @@ -229,3 +305,13 @@ func (b Broadcaster) AccountByAddress(address string) (*BroadcasterAccount, erro } return b.accounts[b.addressAccountMap[address]], nil } + +// AccountByIndex returns the broadcaster account by the given index. +func (b Broadcaster) AccountByIndex(index int) (*BroadcasterAccount, error) { + b.accountMu.Lock() + defer b.accountMu.Unlock() + if len(b.accounts) <= index { + return nil, fmt.Errorf("broadcaster account not found; length: %d, index: %d", len(b.accounts), index) + } + return b.accounts[index], nil +} diff --git a/node/broadcaster/db.go b/node/broadcaster/db.go index 9781b31..bc7a82d 100644 --- a/node/broadcaster/db.go +++ b/node/broadcaster/db.go @@ -1,30 +1,34 @@ package broadcaster import ( - "go.uber.org/zap" - + dbtypes "github.com/initia-labs/opinit-bots/db/types" btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" "github.com/initia-labs/opinit-bots/types" + + "github.com/cosmos/cosmos-sdk/codec" ) /////////////// // PendingTx // /////////////// -func (b Broadcaster) savePendingTx(pendingTx btypes.PendingTxInfo) error { - data, err := pendingTx.Marshal() +// SavePendingTx saves pending tx +func SavePendingTx(db types.BasicDB, pendingTx btypes.PendingTxInfo) error { + data, err := pendingTx.Value() if err != nil { return err } - return b.db.Set(btypes.PrefixedPendingTx(types.MustInt64ToUint64(pendingTx.Timestamp)), data) + return db.Set(pendingTx.Key(), data) } -func (b Broadcaster) deletePendingTx(pendingTx btypes.PendingTxInfo) error { - return b.db.Delete(btypes.PrefixedPendingTx(types.MustInt64ToUint64(pendingTx.Timestamp))) +// DeletePendingTx deletes pending tx +func DeletePendingTx(db types.BasicDB, pendingTx btypes.PendingTxInfo) error { + return db.Delete(pendingTx.Key()) } -func (b Broadcaster) loadPendingTxs() (txs []btypes.PendingTxInfo, err error) { - iterErr := b.db.PrefixedIterate(btypes.PendingTxsKey, nil, func(_, value []byte) (stop bool, err error) { +// LoadPendingTxs loads all pending txs +func LoadPendingTxs(db types.DB) (txs []btypes.PendingTxInfo, err error) { + iterErr := db.Iterate(dbtypes.AppendSplitter(btypes.PendingTxsPrefix), nil, func(_, value []byte) (stop bool, err error) { txInfo := btypes.PendingTxInfo{} err = txInfo.Unmarshal(value) if err != nil { @@ -36,93 +40,101 @@ func (b Broadcaster) loadPendingTxs() (txs []btypes.PendingTxInfo, err error) { if iterErr != nil { return nil, iterErr } - - b.logger.Debug("load pending txs", zap.Int("count", len(txs))) return txs, err } -// PendingTxsToRawKV converts pending txs to raw kv pairs. -// If delete is true, it will return kv pairs for deletion (empty value). -func (b Broadcaster) PendingTxsToRawKV(txInfos []btypes.PendingTxInfo, delete bool) ([]types.RawKV, error) { - kvs := make([]types.RawKV, 0, len(txInfos)) +// SavePendingTxs saves all pending txs +func SavePendingTxs(db types.BasicDB, txInfos []btypes.PendingTxInfo) error { for _, txInfo := range txInfos { - var data []byte - var err error - - if !delete { - if !txInfo.Save { - continue - } - data, err = txInfo.Marshal() - if err != nil { - return nil, err - } + if !txInfo.Save { + continue + } + err := SavePendingTx(db, txInfo) + if err != nil { + return err } - kvs = append(kvs, types.RawKV{ - Key: b.db.PrefixedKey(btypes.PrefixedPendingTx(types.MustInt64ToUint64(txInfo.Timestamp))), - Value: data, - }) } - return kvs, nil + return nil +} + +// DeletePendingTxs deletes all pending txs +func DeletePendingTxs(db types.BasicDB, txInfos []btypes.PendingTxInfo) error { + for _, txInfo := range txInfos { + if err := DeletePendingTx(db, txInfo); err != nil { + return err + } + } + return nil } /////////////////// // ProcessedMsgs // /////////////////// -// ProcessedMsgsToRawKV converts processed data to raw kv pairs. -// If delete is true, it will return kv pairs for deletion (empty value). -func (b Broadcaster) ProcessedMsgsToRawKV(ProcessedMsgs []btypes.ProcessedMsgs, delete bool) ([]types.RawKV, error) { - kvs := make([]types.RawKV, 0, len(ProcessedMsgs)) - for _, processedMsgs := range ProcessedMsgs { - var data []byte - var err error - - if !delete { - if !processedMsgs.Save { - continue - } - - data, err = processedMsgs.MarshalInterfaceJSON(b.cdc) - if err != nil { - return nil, err - } +// SaveProcessedMsgs saves processed messages +func SaveProcessedMsgs(db types.BasicDB, cdc codec.Codec, processedMsgs btypes.ProcessedMsgs) error { + data, err := processedMsgs.Value(cdc) + if err != nil { + return err + } + + err = db.Set(processedMsgs.Key(), data) + if err != nil { + return err + } + return nil +} + +// DeleteProcessedMsgs deletes processed messages +func DeleteProcessedMsgs(db types.BasicDB, processedMsgs btypes.ProcessedMsgs) error { + return db.Delete(processedMsgs.Key()) +} + +// SaveProcessedMsgsBatch saves all processed messages in the batch +func SaveProcessedMsgsBatch(db types.BasicDB, cdc codec.Codec, processedMsgsBatch []btypes.ProcessedMsgs) error { + for _, processedMsgs := range processedMsgsBatch { + if !processedMsgs.Save { + continue + } + + data, err := processedMsgs.Value(cdc) + if err != nil { + return err + } + + err = db.Set(processedMsgs.Key(), data) + if err != nil { + return err } - kvs = append(kvs, types.RawKV{ - Key: b.db.PrefixedKey(btypes.PrefixedProcessedMsgs(types.MustInt64ToUint64(processedMsgs.Timestamp))), - Value: data, - }) } - return kvs, nil + return nil } -// currently no use case, but keep it for future use -// func (n *Broadcaster) saveProcessedMsgs(processedMsgs btypes.ProcessedMsgs) error { -// data, err := processedMsgs.Marshal() -// if err != nil { -// return err -// } -// return b.db.Set(btypes.PrefixedProcessedMsgs(uint64(processedMsgs.Timestamp)), data) -// } - -func (b Broadcaster) loadProcessedMsgs() (ProcessedMsgs []btypes.ProcessedMsgs, err error) { - iterErr := b.db.PrefixedIterate(btypes.ProcessedMsgsKey, nil, func(_, value []byte) (stop bool, err error) { +// LoadProcessedMsgsBatch loads all processed messages in the batch +func LoadProcessedMsgsBatch(db types.DB, cdc codec.Codec) (processedMsgsBatch []btypes.ProcessedMsgs, err error) { + iterErr := db.Iterate(dbtypes.AppendSplitter(btypes.ProcessedMsgsPrefix), nil, func(_, value []byte) (stop bool, err error) { var processedMsgs btypes.ProcessedMsgs - err = processedMsgs.UnmarshalInterfaceJSON(b.cdc, value) + err = processedMsgs.UnmarshalInterfaceJSON(cdc, value) if err != nil { return true, err } - ProcessedMsgs = append(ProcessedMsgs, processedMsgs) + processedMsgsBatch = append(processedMsgsBatch, processedMsgs) return false, nil }) if iterErr != nil { return nil, iterErr } - b.logger.Debug("load pending processed msgs", zap.Int("count", len(ProcessedMsgs))) - return ProcessedMsgs, nil + return processedMsgsBatch, nil } -func (b Broadcaster) deleteProcessedMsgs(timestamp int64) error { - return b.db.Delete(btypes.PrefixedProcessedMsgs(types.MustInt64ToUint64(timestamp))) +// DeleteProcessedMsgsBatch deletes all processed messages in the batch +func DeleteProcessedMsgsBatch(db types.BasicDB, processedMsgsBatch []btypes.ProcessedMsgs) error { + for _, processedMsgs := range processedMsgsBatch { + err := DeleteProcessedMsgs(db, processedMsgs) + if err != nil { + return err + } + } + return nil } diff --git a/node/broadcaster/process.go b/node/broadcaster/process.go index 2bf5803..da569c0 100644 --- a/node/broadcaster/process.go +++ b/node/broadcaster/process.go @@ -1,7 +1,6 @@ package broadcaster import ( - "context" "encoding/hex" "fmt" "math" @@ -22,7 +21,7 @@ func IsTxNotFoundErr(err error, txHash string) bool { } // CheckPendingTx query tx info to check if pending tx is processed. -func (b *Broadcaster) CheckPendingTx(ctx context.Context, pendingTx btypes.PendingTxInfo) (*rpccoretypes.ResultTx, time.Time, error) { +func (b *Broadcaster) CheckPendingTx(ctx types.Context, pendingTx btypes.PendingTxInfo) (*rpccoretypes.ResultTx, time.Time, error) { txHash, err := hex.DecodeString(pendingTx.TxHash) if err != nil { return nil, time.Time{}, err @@ -32,15 +31,16 @@ func (b *Broadcaster) CheckPendingTx(ctx context.Context, pendingTx btypes.Pendi if txerr != nil && IsTxNotFoundErr(txerr, pendingTx.TxHash) { // if the tx is not found, it means the tx is not processed yet // or the tx is not indexed by the node in rare cases. + pendingTxTime := time.Unix(0, pendingTx.Timestamp).UTC() + lastHeader, err := b.rpcClient.Header(ctx, nil) if err != nil { return nil, time.Time{}, err } - pendingTxTime := time.Unix(0, pendingTx.Timestamp) // before timeout if lastHeader.Header.Time.Before(pendingTxTime.Add(b.cfg.TxTimeout)) { - b.logger.Debug("failed to query tx", zap.String("tx_hash", pendingTx.TxHash), zap.String("error", txerr.Error())) + ctx.Logger().Debug("failed to query tx", zap.String("tx_hash", pendingTx.TxHash), zap.String("error", txerr.Error())) return nil, time.Time{}, types.ErrTxNotFound } else { // timeout case @@ -75,8 +75,8 @@ func (b *Broadcaster) CheckPendingTx(ctx context.Context, pendingTx btypes.Pendi // RemovePendingTx remove pending tx from local pending txs. // It is called when the pending tx is included in the block. -func (b *Broadcaster) RemovePendingTx(pendingTx btypes.PendingTxInfo) error { - err := b.deletePendingTx(pendingTx) +func (b *Broadcaster) RemovePendingTx(ctx types.Context, pendingTx btypes.PendingTxInfo) error { + err := DeletePendingTx(b.db, pendingTx) if err != nil { return err } @@ -86,37 +86,37 @@ func (b *Broadcaster) RemovePendingTx(pendingTx btypes.PendingTxInfo) error { } // Start broadcaster loop -func (b *Broadcaster) Start(ctx context.Context) error { +func (b *Broadcaster) Start(ctx types.Context) error { defer close(b.txChannelStopped) for { select { case <-ctx.Done(): return nil - case data := <-b.txChannel: + case msgs := <-b.txChannel: var err error - broadcasterAccount, err := b.AccountByAddress(data.Sender) + broadcasterAccount, err := b.AccountByAddress(msgs.Sender) if err != nil { return err } for retry := 1; retry <= types.MaxRetryCount; retry++ { - err = b.handleProcessedMsgs(ctx, data, broadcasterAccount) + err = b.handleProcessedMsgs(ctx, msgs, broadcasterAccount) if err == nil { break - } else if err = b.handleMsgError(err, broadcasterAccount); err == nil { + } else if err = b.handleMsgError(ctx, err, broadcasterAccount); err == nil { // if the error is handled, we can delete the processed msgs - err = b.deleteProcessedMsgs(data.Timestamp) + err = DeleteProcessedMsgs(b.db, msgs) if err != nil { return err } break - } else if !data.Save { - b.logger.Warn("discard msgs: failed to handle processed msgs", zap.String("error", err.Error())) + } else if !msgs.Save { + ctx.Logger().Warn("discard msgs: failed to handle processed msgs", zap.String("error", err.Error())) // if the message does not need to be saved, we can skip retry err = nil break } - b.logger.Warn(fmt.Sprintf("retry to handle processed msgs after %d seconds", int(2*math.Exp2(float64(retry)))), zap.Int("count", retry), zap.String("error", err.Error())) + ctx.Logger().Warn("retry to handle processed msgs", zap.Int("seconds", int(2*math.Exp2(float64(retry)))), zap.Int("count", retry), zap.String("error", err.Error())) if types.SleepWithRetry(ctx, retry) { return nil } @@ -128,15 +128,16 @@ func (b *Broadcaster) Start(ctx context.Context) error { } } -// @dev: these pending processed data is filled at initialization(`NewBroadcaster`). +// BroadcastPendingProcessedMsgs broadcasts pending processed messages to the Broadcaster. +// It is called before the node process the block. func (b Broadcaster) BroadcastPendingProcessedMsgs() { - for _, processedMsg := range b.pendingProcessedMsgs { - b.BroadcastMsgs(processedMsg) + for _, processedMsg := range b.pendingProcessedMsgsBatch { + b.BroadcastProcessedMsgs(processedMsg) } } -// BroadcastTxSync broadcasts transaction bytes to txBroadcastLooper. -func (b Broadcaster) BroadcastMsgs(msgs btypes.ProcessedMsgs) { +// BroadcastProcessedMsgs broadcasts processed messages to the Broadcaster. +func (b Broadcaster) BroadcastProcessedMsgs(msgs btypes.ProcessedMsgs) { if b.txChannel == nil { return } diff --git a/node/broadcaster/testutil.go b/node/broadcaster/testutil.go new file mode 100644 index 0000000..e4fda25 --- /dev/null +++ b/node/broadcaster/testutil.go @@ -0,0 +1,74 @@ +package broadcaster + +import ( + "fmt" + "sync" + + "github.com/initia-labs/opinit-bots/keys" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + "github.com/initia-labs/opinit-bots/node/rpcclient" + "github.com/initia-labs/opinit-bots/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/crypto/hd" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func NewTestBroadcaster(cdc codec.Codec, db types.DB, rpcClient *rpcclient.RPCClient, txConfig client.TxConfig, prefix string, numAccounts int) (*Broadcaster, error) { + b := &Broadcaster{ + cdc: cdc, + db: db, + rpcClient: rpcClient, + + txConfig: txConfig, + accounts: make([]*BroadcasterAccount, 0), + addressAccountMap: make(map[string]int), + accountMu: &sync.Mutex{}, + + txChannel: make(chan btypes.ProcessedMsgs), + txChannelStopped: make(chan struct{}), + + pendingTxMu: &sync.Mutex{}, + pendingTxs: make([]btypes.PendingTxInfo, 0), + pendingProcessedMsgsBatch: make([]btypes.ProcessedMsgs, 0), + } + + for i := 0; i < numAccounts; i++ { + keybase, err := keys.GetKeyBase("", "", cdc, nil) + if err != nil { + return nil, err + } + + mnemonic, err := keys.CreateMnemonic() + if err != nil { + return nil, err + } + keyName := fmt.Sprintf("%d", i) + account, err := keybase.NewAccount(keyName, mnemonic, "", hd.CreateHDPath(sdk.CoinType, 0, 0).String(), hd.Secp256k1) + if err != nil { + return nil, err + } + addr, err := account.GetAddress() + if err != nil { + return nil, err + } + addrString, err := keys.EncodeBech32AccAddr(addr, prefix) + if err != nil { + return nil, err + } + + broadcasterAccount := &BroadcasterAccount{ + cdc: cdc, + txConfig: txConfig, + rpcClient: rpcClient, + keyName: keyName, + keyBase: keybase, + keyringRecord: account, + address: addr, + addressString: addrString, + } + b.accounts = append(b.accounts, broadcasterAccount) + } + return b, nil +} diff --git a/node/broadcaster/tx.go b/node/broadcaster/tx.go index 1b761c3..d4b755e 100644 --- a/node/broadcaster/tx.go +++ b/node/broadcaster/tx.go @@ -1,7 +1,6 @@ package broadcaster import ( - "context" "fmt" "regexp" "strconv" @@ -14,6 +13,7 @@ import ( btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + "github.com/initia-labs/opinit-bots/types" ) var ignoringErrors = []error{ @@ -25,7 +25,9 @@ var ignoringErrors = []error{ var accountSeqRegex = regexp.MustCompile("account sequence mismatch, expected ([0-9]+), got ([0-9]+)") var outputIndexRegex = regexp.MustCompile("expected ([0-9]+), got ([0-9]+): invalid output index") -func (b *Broadcaster) handleMsgError(err error, broadcasterAccount *BroadcasterAccount) error { +// handleMsgError handles error when processing messages. +// If there is an error known to be ignored, it will be ignored. +func (b *Broadcaster) handleMsgError(ctx types.Context, err error, broadcasterAccount *BroadcasterAccount) error { if strs := accountSeqRegex.FindStringSubmatch(err.Error()); strs != nil { expected, parseErr := strconv.ParseUint(strs[1], 10, 64) if parseErr != nil { @@ -53,7 +55,7 @@ func (b *Broadcaster) handleMsgError(err error, broadcasterAccount *BroadcasterA } if expected > got { - b.logger.Warn("ignoring error", zap.String("error", err.Error())) + ctx.Logger().Warn("ignoring error", zap.String("error", err.Error())) return nil } @@ -62,7 +64,7 @@ func (b *Broadcaster) handleMsgError(err error, broadcasterAccount *BroadcasterA for _, e := range ignoringErrors { if strings.Contains(err.Error(), e.Error()) { - b.logger.Warn("ignoring error", zap.String("error", e.Error())) + ctx.Logger().Warn("ignoring error", zap.String("error", e.Error())) return nil } } @@ -73,10 +75,10 @@ func (b *Broadcaster) handleMsgError(err error, broadcasterAccount *BroadcasterA // HandleProcessedMsgs handles processed messages by broadcasting them to the network. // It stores the transaction in the database and local memory and keep track of the successful broadcast. -func (b *Broadcaster) handleProcessedMsgs(ctx context.Context, data btypes.ProcessedMsgs, broadcasterAccount *BroadcasterAccount) error { +func (b *Broadcaster) handleProcessedMsgs(ctx types.Context, data btypes.ProcessedMsgs, broadcasterAccount *BroadcasterAccount) error { sequence := broadcasterAccount.Sequence() - txBytes, txHash, err := broadcasterAccount.BuildTxWithMessages(ctx, data.Msgs) + txBytes, txHash, err := broadcasterAccount.BuildTxWithMsgs(ctx, data.Msgs) if err != nil { return errors.Wrapf(err, "simulation failed") } @@ -90,9 +92,9 @@ func (b *Broadcaster) handleProcessedMsgs(ctx context.Context, data btypes.Proce return fmt.Errorf("broadcast txs: %s", res.Log) } - b.logger.Debug("broadcast tx", zap.String("tx_hash", txHash), zap.Uint64("sequence", sequence)) + ctx.Logger().Debug("broadcast tx", zap.String("tx_hash", txHash), zap.Uint64("sequence", sequence)) - err = b.deleteProcessedMsgs(data.Timestamp) + err = DeleteProcessedMsgs(b.db, data) if err != nil { return err } @@ -111,7 +113,7 @@ func (b *Broadcaster) handleProcessedMsgs(ctx context.Context, data btypes.Proce if pendingTx.Save { // save pending transaction to the database for handling after restart - err = b.savePendingTx(pendingTx) + err = SavePendingTx(b.db, pendingTx) if err != nil { return err } diff --git a/node/broadcaster/types/config.go b/node/broadcaster/types/config.go index ff8fe3e..9360e5a 100644 --- a/node/broadcaster/types/config.go +++ b/node/broadcaster/types/config.go @@ -12,8 +12,8 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) -type BuildTxWithMessagesFn func(context.Context, []sdk.Msg) ([]byte, string, error) -type PendingTxToProcessedMsgsFn func([]byte) ([]sdk.Msg, error) +type BuildTxWithMsgsFn func(context.Context, []sdk.Msg) ([]byte, string, error) +type MsgsFromTxFn func([]byte) ([]sdk.Msg, error) type BroadcasterConfig struct { // ChainID is the chain ID. @@ -30,9 +30,6 @@ type BroadcasterConfig struct { // Bech32Prefix is the Bech32 prefix. Bech32Prefix string - - // HomePath is the path to the keyring. - HomePath string } func (bc BroadcasterConfig) Validate() error { @@ -60,12 +57,12 @@ func (bc BroadcasterConfig) Validate() error { return nil } -func (bc BroadcasterConfig) GetKeyringRecord(cdc codec.Codec, keyringConfig *KeyringConfig) (keyring.Keyring, *keyring.Record, error) { +func (bc BroadcasterConfig) GetKeyringRecord(cdc codec.Codec, keyringConfig *KeyringConfig, homePath string) (keyring.Keyring, *keyring.Record, error) { if keyringConfig == nil { return nil, nil, fmt.Errorf("keyring config cannot be nil") } - keyBase, err := keys.GetKeyBase(bc.ChainID, bc.HomePath, cdc, nil) + keyBase, err := keys.GetKeyBase(bc.ChainID, homePath, cdc, nil) if err != nil { return nil, nil, err } else if keyBase == nil { @@ -92,11 +89,11 @@ type KeyringConfig struct { // FeeGranter is the fee granter. FeeGranter *KeyringConfig - // BuildTxWithMessages is the function to build a transaction with messages. - BuildTxWithMessages BuildTxWithMessagesFn + // BuildTxWithMsgs is the function to build a transaction with messages. + BuildTxWithMsgs BuildTxWithMsgsFn - // PendingTxToProcessedMsgs is the function to convert pending tx to processed messages. - PendingTxToProcessedMsgs PendingTxToProcessedMsgsFn + // MsgsFromTx is the function to convert pending tx to processed messages. + MsgsFromTx MsgsFromTxFn } func (kc KeyringConfig) GetKeyRecord(keyBase keyring.Keyring, bech32Prefix string) (*keyring.Record, error) { @@ -123,12 +120,12 @@ func (kc KeyringConfig) GetKeyRecord(keyBase keyring.Keyring, bech32Prefix strin return nil, fmt.Errorf("keyring config is invalid") } -func (kc *KeyringConfig) WithPendingTxToProcessedMsgsFn(fn PendingTxToProcessedMsgsFn) { - kc.PendingTxToProcessedMsgs = fn +func (kc *KeyringConfig) WithPendingTxToProcessedMsgsFn(fn MsgsFromTxFn) { + kc.MsgsFromTx = fn } -func (kc *KeyringConfig) WithBuildTxWithMessagesFn(fn BuildTxWithMessagesFn) { - kc.BuildTxWithMessages = fn +func (kc *KeyringConfig) WithBuildTxWithMessagesFn(fn BuildTxWithMsgsFn) { + kc.BuildTxWithMsgs = fn } func (kc KeyringConfig) Validate() error { diff --git a/node/broadcaster/types/db.go b/node/broadcaster/types/db.go index ddeac8b..ea94f5e 100644 --- a/node/broadcaster/types/db.go +++ b/node/broadcaster/types/db.go @@ -6,6 +6,8 @@ import ( "strings" "time" + "github.com/initia-labs/opinit-bots/types" + "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -25,6 +27,14 @@ type PendingTxInfo struct { Save bool `json:"save"` } +func (p PendingTxInfo) Key() []byte { + return prefixedPendingTx(types.MustInt64ToUint64(p.Timestamp)) +} + +func (p PendingTxInfo) Value() ([]byte, error) { + return p.Marshal() +} + func (p PendingTxInfo) Marshal() ([]byte, error) { return json.Marshal(&p) } @@ -57,6 +67,14 @@ type processedMsgsJSON struct { Save bool `json:"save"` } +func (p ProcessedMsgs) Key() []byte { + return prefixedProcessedMsgs(types.MustInt64ToUint64(p.Timestamp)) +} + +func (p ProcessedMsgs) Value(cdc codec.Codec) ([]byte, error) { + return p.MarshalInterfaceJSON(cdc) +} + func (p ProcessedMsgs) MarshalInterfaceJSON(cdc codec.Codec) ([]byte, error) { pms := processedMsgsJSON{ Sender: p.Sender, diff --git a/node/broadcaster/types/keys.go b/node/broadcaster/types/keys.go index 7f77ba3..cc8c7d1 100644 --- a/node/broadcaster/types/keys.go +++ b/node/broadcaster/types/keys.go @@ -6,14 +6,20 @@ import ( var ( // Keys - PendingTxsKey = []byte("pending_txs") - ProcessedMsgsKey = []byte("processed_msgs") + PendingTxsPrefix = []byte("pending_txs") + ProcessedMsgsPrefix = []byte("processed_msgs") ) -func PrefixedPendingTx(timestamp uint64) []byte { - return append(append(PendingTxsKey, dbtypes.Splitter), dbtypes.FromUint64Key(timestamp)...) +func prefixedPendingTx(timestamp uint64) []byte { + return dbtypes.GenerateKey([][]byte{ + PendingTxsPrefix, + dbtypes.FromUint64Key(timestamp), + }) } -func PrefixedProcessedMsgs(timestamp uint64) []byte { - return append(append(ProcessedMsgsKey, dbtypes.Splitter), dbtypes.FromUint64Key(timestamp)...) +func prefixedProcessedMsgs(timestamp uint64) []byte { + return dbtypes.GenerateKey([][]byte{ + ProcessedMsgsPrefix, + dbtypes.FromUint64Key(timestamp), + }) } diff --git a/node/broadcaster/types/tx.go b/node/broadcaster/types/tx.go deleted file mode 100644 index 06b59be..0000000 --- a/node/broadcaster/types/tx.go +++ /dev/null @@ -1,11 +0,0 @@ -package types - -import ( - "fmt" - - comettypes "github.com/cometbft/cometbft/types" -) - -func TxHash(txBytes []byte) string { - return fmt.Sprintf("%X", comettypes.Tx(txBytes).Hash()) -} diff --git a/node/db.go b/node/db.go index c7fcaff..af2f82a 100644 --- a/node/db.go +++ b/node/db.go @@ -5,59 +5,36 @@ import ( btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" - "go.uber.org/zap" + "github.com/pkg/errors" ) -func (n *Node) SetSyncInfo(height int64) { - n.lastProcessedBlockHeight = height - if n.broadcaster != nil { - n.broadcaster.SetSyncInfo(n.lastProcessedBlockHeight) - } -} - -func (n *Node) loadSyncInfo(processedHeight int64) error { - data, err := n.db.Get(nodetypes.LastProcessedBlockHeightKey) - if err == dbtypes.ErrNotFound { - n.SetSyncInfo(processedHeight) - n.startHeightInitialized = true - n.logger.Info("initialize sync info", zap.Int64("start_height", processedHeight+1)) - return nil - } else if err != nil { - return err - } - - lastSyncedHeight, err := dbtypes.ToInt64(data) +// GetSyncInfo gets the synced height +func GetSyncInfo(db types.BasicDB) (int64, error) { + loadedHeightBytes, err := db.Get(nodetypes.SyncedHeightKey) if err != nil { - return err + return 0, errors.Wrap(err, "failed to load sync info") } - n.SetSyncInfo(lastSyncedHeight) - n.logger.Debug("load sync info", zap.Int64("last_processed_height", n.lastProcessedBlockHeight)) - - return nil -} - -func (n Node) SaveSyncInfo(height int64) error { - return n.db.Set(nodetypes.LastProcessedBlockHeightKey, dbtypes.FromUint64(types.MustInt64ToUint64(height))) -} - -func (n Node) SyncInfoToRawKV(height int64) types.RawKV { - return types.RawKV{ - Key: n.db.PrefixedKey(nodetypes.LastProcessedBlockHeightKey), - Value: dbtypes.FromUint64(types.MustInt64ToUint64(height)), + syncedHeight, err := nodetypes.UnmarshalHeight(loadedHeightBytes) + if err != nil { + return 0, errors.Wrap(err, "failed to deserialize height") } + return syncedHeight, nil } -func (n Node) DeleteSyncInfo() error { - return n.db.Delete(nodetypes.LastProcessedBlockHeightKey) +// SetSyncInfo sets the synced height +func SetSyncedHeight(db types.BasicDB, height int64) error { + return db.Set(nodetypes.SyncedHeightKey, nodetypes.MarshalHeight(height)) } -func DeleteSyncInfo(db types.DB) error { - return db.Delete(nodetypes.LastProcessedBlockHeightKey) +// DeleteSyncInfo deletes the synced height +func DeleteSyncedHeight(db types.BasicDB) error { + return db.Delete(nodetypes.SyncedHeightKey) } +// DeleteProcessedMsgs deletes all processed messages func DeleteProcessedMsgs(db types.DB) error { - return db.PrefixedIterate(btypes.ProcessedMsgsKey, nil, func(key, _ []byte) (stop bool, err error) { + return db.Iterate(dbtypes.AppendSplitter(btypes.ProcessedMsgsPrefix), nil, func(key, _ []byte) (stop bool, err error) { err = db.Delete(key) if err != nil { return stop, err @@ -66,8 +43,9 @@ func DeleteProcessedMsgs(db types.DB) error { }) } +// DeletePendingTxs deletes all pending txs func DeletePendingTxs(db types.DB) error { - return db.PrefixedIterate(btypes.PendingTxsKey, nil, func(key, _ []byte) (stop bool, err error) { + return db.Iterate(dbtypes.AppendSplitter(btypes.PendingTxsPrefix), nil, func(key, _ []byte) (stop bool, err error) { err = db.Delete(key) if err != nil { return stop, err diff --git a/node/node.go b/node/node.go index 9d5a67c..2517925 100644 --- a/node/node.go +++ b/node/node.go @@ -1,28 +1,27 @@ package node import ( - "context" "fmt" + "sync" "github.com/pkg/errors" "cosmossdk.io/core/address" + dbtypes "github.com/initia-labs/opinit-bots/db/types" "github.com/initia-labs/opinit-bots/node/broadcaster" btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" "github.com/initia-labs/opinit-bots/node/rpcclient" nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" "go.uber.org/zap" - "golang.org/x/sync/errgroup" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" ) type Node struct { - cfg nodetypes.NodeConfig - db types.DB - logger *zap.Logger + cfg nodetypes.NodeConfig + db types.DB cdc codec.Codec txConfig client.TxConfig @@ -38,12 +37,13 @@ type Node struct { rawBlockHandler nodetypes.RawBlockHandlerFn // status info - startHeightInitialized bool - lastProcessedBlockHeight int64 - running bool + startHeightInitialized bool + syncedHeight int64 + + startOnce *sync.Once } -func NewNode(cfg nodetypes.NodeConfig, db types.DB, logger *zap.Logger, cdc codec.Codec, txConfig client.TxConfig) (*Node, error) { +func NewNode(cfg nodetypes.NodeConfig, db types.DB, cdc codec.Codec, txConfig client.TxConfig) (*Node, error) { if err := cfg.Validate(); err != nil { return nil, err } @@ -56,37 +56,44 @@ func NewNode(cfg nodetypes.NodeConfig, db types.DB, logger *zap.Logger, cdc code n := &Node{ rpcClient: rpcClient, - cfg: cfg, - db: db, - logger: logger, + cfg: cfg, + db: db, eventHandlers: make(map[string]nodetypes.EventHandlerFn), cdc: cdc, txConfig: txConfig, + + startOnce: &sync.Once{}, } // create broadcaster if n.cfg.BroadcasterConfig != nil { n.broadcaster, err = broadcaster.NewBroadcaster( *n.cfg.BroadcasterConfig, n.db, - n.logger, n.cdc, n.txConfig, - n.rpcClient, + rpcClient, ) if err != nil { return nil, errors.Wrap(err, "failed to create broadcaster") } } + syncedHeight, err := GetSyncInfo(n.db) + if errors.Is(err, dbtypes.ErrNotFound) { + syncedHeight = 0 + } else if err != nil { + return nil, errors.Wrap(err, "failed to load sync info") + } + n.UpdateSyncedHeight(syncedHeight) return n, nil } // StartHeight is the height to start processing. // If it is 0, the latest height is used. // If the latest height exists in the database, this is ignored. -func (n *Node) Initialize(ctx context.Context, processedHeight int64, keyringConfig []btypes.KeyringConfig) (err error) { +func (n *Node) Initialize(ctx types.Context, processedHeight int64, keyringConfig []btypes.KeyringConfig) (err error) { // check if node is catching up status, err := n.rpcClient.Status(ctx) if err != nil { @@ -95,34 +102,43 @@ func (n *Node) Initialize(ctx context.Context, processedHeight int64, keyringCon if status.SyncInfo.CatchingUp { return errors.New("node is catching up") } - if n.broadcaster != nil { + if n.HasBroadcaster() { err = n.broadcaster.Initialize(ctx, status, keyringConfig) if err != nil { return err } } - // load sync info - return n.loadSyncInfo(processedHeight) + // if not found, initialize the height + if n.GetSyncedHeight() == 0 { + n.UpdateSyncedHeight(processedHeight) + n.startHeightInitialized = true + ctx.Logger().Info("initialize height") + } else if err != nil { + return errors.Wrap(err, "failed to load sync info") + } + ctx.Logger().Debug("load sync info", zap.Int64("synced_height", n.syncedHeight)) + return nil } +// HeightInitialized returns true if the start height is initialized. func (n *Node) HeightInitialized() bool { return n.startHeightInitialized } -func (n *Node) Start(ctx context.Context) { - if n.running { - return - } - n.running = true +func (n *Node) Start(ctx types.Context) { + n.startOnce.Do(func() { + n.start(ctx) + }) +} - errGrp := ctx.Value(types.ContextKeyErrGrp).(*errgroup.Group) - if n.broadcaster != nil { - errGrp.Go(func() (err error) { +func (n *Node) start(ctx types.Context) { + if n.HasBroadcaster() { + ctx.ErrGrp().Go(func() (err error) { defer func() { - n.logger.Info("tx broadcast looper stopped") + ctx.Logger().Info("tx broadcast looper stopped") if r := recover(); r != nil { - n.logger.Error("tx broadcast looper panic", zap.Any("recover", r)) + ctx.Logger().Error("tx broadcast looper panic", zap.Any("recover", r)) err = fmt.Errorf("tx broadcast looper panic: %v", r) } }() @@ -137,11 +153,11 @@ func (n *Node) Start(ctx context.Context) { enableEventHandler := true if n.cfg.ProcessType != nodetypes.PROCESS_TYPE_ONLY_BROADCAST { enableEventHandler = false - errGrp.Go(func() (err error) { + ctx.ErrGrp().Go(func() (err error) { defer func() { - n.logger.Info("block process looper stopped") + ctx.Logger().Info("block process looper stopped") if r := recover(); r != nil { - n.logger.Error("block process looper panic", zap.Any("recover", r)) + ctx.Logger().Error("block process looper panic", zap.Any("recover", r)) err = fmt.Errorf("block process looper panic: %v", r) } }() @@ -150,11 +166,11 @@ func (n *Node) Start(ctx context.Context) { }) } - errGrp.Go(func() (err error) { + ctx.ErrGrp().Go(func() (err error) { defer func() { - n.logger.Info("tx checker looper stopped") + ctx.Logger().Info("tx checker looper stopped") if r := recover(); r != nil { - n.logger.Error("tx checker panic", zap.Any("recover", r)) + ctx.Logger().Error("tx checker panic", zap.Any("recover", r)) err = fmt.Errorf("tx checker panic: %v", r) } }() @@ -167,8 +183,31 @@ func (n Node) AccountCodec() address.Codec { return n.cdc.InterfaceRegistry().SigningContext().AddressCodec() } +func (n Node) Codec() codec.Codec { + return n.cdc +} + +func (n Node) DB() types.DB { + return n.db +} + +// GetHeight returns the processing height. +// It returns the synced height + 1. func (n Node) GetHeight() int64 { - return n.lastProcessedBlockHeight + 1 + return n.syncedHeight + 1 +} + +func (n *Node) UpdateSyncedHeight(height int64) { + n.syncedHeight = height + if n.HasBroadcaster() { + n.broadcaster.UpdateSyncedHeight(height) + } +} + +// GetSyncedHeight returns the synced height. +// It returns the last processed height. +func (n Node) GetSyncedHeight() int64 { + return n.syncedHeight } func (n Node) GetTxConfig() client.TxConfig { @@ -180,7 +219,7 @@ func (n Node) HasBroadcaster() bool { } func (n Node) GetBroadcaster() (*broadcaster.Broadcaster, error) { - if n.broadcaster == nil { + if !n.HasBroadcaster() { return nil, types.ErrKeyNotSet } @@ -188,10 +227,9 @@ func (n Node) GetBroadcaster() (*broadcaster.Broadcaster, error) { } func (n Node) MustGetBroadcaster() *broadcaster.Broadcaster { - if n.broadcaster == nil { + if !n.HasBroadcaster() { panic("cannot get broadcaster without broadcaster") } - return n.broadcaster } diff --git a/node/process.go b/node/process.go index 1d33af2..d0b9307 100644 --- a/node/process.go +++ b/node/process.go @@ -1,12 +1,12 @@ package node import ( - "context" "fmt" "time" abcitypes "github.com/cometbft/cometbft/abci/types" rpccoretypes "github.com/cometbft/cometbft/rpc/core/types" + comettypes "github.com/cometbft/cometbft/types" nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" "github.com/pkg/errors" @@ -14,16 +14,13 @@ import ( ) // blockProcessLooper fetches new blocks and processes them -func (n *Node) blockProcessLooper(ctx context.Context, processType nodetypes.BlockProcessType) error { - timer := time.NewTicker(types.PollingInterval(ctx)) - defer timer.Stop() - +func (n *Node) blockProcessLooper(ctx types.Context, processType nodetypes.BlockProcessType) error { consecutiveErrors := 0 for { select { case <-ctx.Done(): return nil - case <-timer.C: + default: if types.SleepWithRetry(ctx, consecutiveErrors) { return nil } @@ -32,254 +29,158 @@ func (n *Node) blockProcessLooper(ctx context.Context, processType nodetypes.Blo status, err := n.rpcClient.Status(ctx) if err != nil { - n.logger.Error("failed to get node status ", zap.String("error", err.Error())) + ctx.Logger().Error("failed to get node status ", zap.String("error", err.Error())) continue } - latestChainHeight := status.SyncInfo.LatestBlockHeight - if n.lastProcessedBlockHeight >= latestChainHeight { + latestHeight := status.SyncInfo.LatestBlockHeight + if n.syncedHeight >= latestHeight { + ctx.Logger().Warn("already synced", zap.Int64("synced_height", n.syncedHeight), zap.Int64("latest_height", latestHeight)) continue } - switch processType { - case nodetypes.PROCESS_TYPE_DEFAULT: - for queryHeight := n.lastProcessedBlockHeight + 1; queryHeight <= latestChainHeight; { - select { - case <-ctx.Done(): - return nil - case <-timer.C: - } - // TODO: may fetch blocks in batch - block, blockResult, err := n.fetchNewBlock(ctx, queryHeight) - if err != nil { - // TODO: handle error - n.logger.Error("failed to fetch new block", zap.String("error", err.Error())) - break - } + err = n.processBlocks(ctx, processType, latestHeight) + if nodetypes.HandleErrIgnoreAndTryLater(ctx, err) { + ctx.Logger().Warn("ignore and try later", zap.String("error", err.Error())) + } else if err != nil { + ctx.Logger().Error("failed to process block", zap.String("error", err.Error())) + } else { + consecutiveErrors = 0 + } + } +} - err = n.handleNewBlock(ctx, block, blockResult, latestChainHeight) - if err != nil { - n.logger.Error("failed to handle new block", zap.String("error", err.Error())) - if errors.Is(err, nodetypes.ErrIgnoreAndTryLater) { - sleep := time.NewTimer(time.Minute) - select { - case <-ctx.Done(): - return nil - case <-sleep.C: - } - } - break - } - n.lastProcessedBlockHeight = queryHeight - queryHeight++ - } +// processBlocks fetches new blocks and processes them +// if the process type is default, it will fetch blocks one by one and handle txs and events +// if the process type is raw, it will fetch blocks in bulk and send them to the raw block handler +func (n *Node) processBlocks(ctx types.Context, processType nodetypes.BlockProcessType, latestHeight int64) error { + switch processType { + case nodetypes.PROCESS_TYPE_DEFAULT: + return n.processBlocksTypeDefault(ctx, latestHeight) + case nodetypes.PROCESS_TYPE_RAW: + return n.processBlocksTypeRaw(ctx, latestHeight) + default: + return errors.New("unknown block process type") + } +} - case nodetypes.PROCESS_TYPE_RAW: - start := n.lastProcessedBlockHeight + 1 - end := n.lastProcessedBlockHeight + 100 - if end > latestChainHeight { - end = latestChainHeight - } +// processBlocksTypeDefault fetches new blocks one by one and processes them +func (n *Node) processBlocksTypeDefault(ctx types.Context, latestHeight int64) error { + timer := time.NewTicker(ctx.PollingInterval()) + defer timer.Stop() - blockBulk, err := n.rpcClient.QueryBlockBulk(ctx, start, end) - if err != nil { - n.logger.Error("failed to fetch block bulk", zap.String("error", err.Error())) - continue - } + for height := n.syncedHeight + 1; height <= latestHeight; { + select { + case <-ctx.Done(): + return nil + case <-timer.C: + } - for i := start; i <= end; i++ { - select { - case <-ctx.Done(): - return nil - default: - } - err := n.rawBlockHandler(ctx, nodetypes.RawBlockArgs{ - BlockHeight: i, - LatestHeight: latestChainHeight, - BlockBytes: blockBulk[i-start], - }) - if err != nil { - n.logger.Error("failed to handle raw block", zap.String("error", err.Error())) - break - } - n.lastProcessedBlockHeight = i - } + block, blockResult, err := n.fetchNewBlock(ctx, height) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to fetch new block; height: %d", height)) + } + + err = n.handleNewBlock(ctx, block, blockResult, latestHeight) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to handle new block; height: %d", height)) } - consecutiveErrors = 0 + + n.UpdateSyncedHeight(height) + height++ } + return nil } -// fetch new block from the chain -func (n *Node) fetchNewBlock(ctx context.Context, height int64) (block *rpccoretypes.ResultBlock, blockResult *rpccoretypes.ResultBlockResults, err error) { - n.logger.Debug("fetch new block", zap.Int64("height", height)) - block, err = n.rpcClient.Block(ctx, &height) +// processBlocksTypeRaw fetches new blocks in bulk and sends them to the raw block handler +func (n *Node) processBlocksTypeRaw(ctx types.Context, latestHeight int64) error { + start := n.syncedHeight + 1 + end := n.syncedHeight + types.RAW_BLOCK_QUERY_MAX_SIZE + if end > latestHeight { + end = latestHeight + } + + blockBulk, err := n.rpcClient.QueryBlockBulk(ctx, start, end) if err != nil { - return nil, nil, err + return errors.Wrap(err, fmt.Sprintf("failed to fetch block bulk: [%d, %d]", start, end)) } - if len(n.eventHandlers) != 0 { - blockResult, err = n.rpcClient.BlockResults(ctx, &height) + for height := start; height <= end; height++ { + select { + case <-ctx.Done(): + return nil + default: + } + + err = n.handleRawBlock(ctx, height, latestHeight, blockBulk[height-start]) if err != nil { - return nil, nil, err + return errors.Wrap(err, fmt.Sprintf("failed to handle raw block: height: %d", height)) } + n.UpdateSyncedHeight(height) + } + return nil +} + +// fetchNewBlock fetches a new block and block results given the height +func (n *Node) fetchNewBlock(ctx types.Context, height int64) (*rpccoretypes.ResultBlock, *rpccoretypes.ResultBlockResults, error) { + ctx.Logger().Debug("fetch new block", zap.Int64("height", height)) + + block, err := n.rpcClient.Block(ctx, &height) + if err != nil { + return nil, nil, err + } + + blockResult, err := n.rpcClient.BlockResults(ctx, &height) + if err != nil { + return nil, nil, err } return block, blockResult, nil } -func (n *Node) handleNewBlock(ctx context.Context, block *rpccoretypes.ResultBlock, blockResult *rpccoretypes.ResultBlockResults, latestChainHeight int64) error { +// handleNewBlock handles a new block and block results given the height +// it sends txs and events to the respective registered handlers +func (n *Node) handleNewBlock(ctx types.Context, block *rpccoretypes.ResultBlock, blockResult *rpccoretypes.ResultBlockResults, latestChainHeight int64) error { protoBlock, err := block.Block.ToProto() if err != nil { - return err + return errors.Wrap(err, "failed to convert block to proto block") } - if n.beginBlockHandler != nil { - err := n.beginBlockHandler(ctx, nodetypes.BeginBlockArgs{ - BlockID: block.BlockID.Hash, - Block: *protoBlock, - LatestHeight: latestChainHeight, - }) - if err != nil { - return err - } + err = n.handleBeginBlock(ctx, block.BlockID.Hash, protoBlock, latestChainHeight) + if err != nil { + return errors.Wrap(err, "failed to handle begin block") } - for txIndex, tx := range block.Block.Txs { - if n.txHandler != nil { - err := n.txHandler(ctx, nodetypes.TxHandlerArgs{ - BlockHeight: block.Block.Height, - BlockTime: block.Block.Time, - LatestHeight: latestChainHeight, - TxIndex: int64(txIndex), - Tx: tx, - Success: blockResult.TxsResults[txIndex].Code == abcitypes.CodeTypeOK, - }) - if err != nil { - return fmt.Errorf("failed to handle tx: tx_index: %d; %w", txIndex, err) - } - } - - if len(n.eventHandlers) != 0 { - events := blockResult.TxsResults[txIndex].GetEvents() - for eventIndex, event := range events { - err := n.handleEvent(ctx, block.Block.Height, block.Block.Time, latestChainHeight, event) - if err != nil { - return fmt.Errorf("failed to handle event: tx_index: %d, event_index: %d; %w", txIndex, eventIndex, err) - } - } - } + err = n.handleBlockTxs(ctx, block, blockResult, latestChainHeight) + if err != nil { + return errors.Wrap(err, "failed to handle block txs") } - if len(n.eventHandlers) != 0 { - for eventIndex, event := range blockResult.FinalizeBlockEvents { - err := n.handleEvent(ctx, block.Block.Height, block.Block.Time, latestChainHeight, event) - if err != nil { - return fmt.Errorf("failed to handle event: finalize block, event_index: %d; %w", eventIndex, err) - } - } + err = n.handleFinalizeBlock(ctx, block.Block.Height, block.Block.Time.UTC(), blockResult, latestChainHeight) + if err != nil { + return errors.Wrap(err, "failed to handle finalize block") } - if n.endBlockHandler != nil { - err := n.endBlockHandler(ctx, nodetypes.EndBlockArgs{ - BlockID: block.BlockID.Hash, - Block: *protoBlock, - LatestHeight: latestChainHeight, - }) - if err != nil { - return fmt.Errorf("failed to handle end block; %w", err) - } + err = n.handleEndBlock(ctx, block.BlockID.Hash, protoBlock, latestChainHeight) + if err != nil { + return errors.Wrap(err, "failed to handle end block") } return nil } -func (n *Node) handleEvent(ctx context.Context, blockHeight int64, blockTime time.Time, latestHeight int64, event abcitypes.Event) error { +// handleEvent handles the event for the given transaction +func (n *Node) handleEvent(ctx types.Context, blockHeight int64, blockTime time.Time, latestHeight int64, tx comettypes.Tx, txIndex int64, event abcitypes.Event) error { + // ignore if no event handlers if n.eventHandlers[event.GetType()] == nil { return nil } - n.logger.Debug("handle event", zap.Int64("height", blockHeight), zap.String("type", event.GetType())) + ctx.Logger().Debug("handle event", zap.Int64("height", blockHeight), zap.String("type", event.GetType())) return n.eventHandlers[event.Type](ctx, nodetypes.EventHandlerArgs{ BlockHeight: blockHeight, BlockTime: blockTime, LatestHeight: latestHeight, + TxIndex: txIndex, + Tx: tx, EventAttributes: event.GetAttributes(), }) } - -// txChecker checks pending txs and handle events if the tx is included in the block -// in the case that the tx hash is not indexed by the node even if the tx is processed, -// event handler will not be called. -// so, it is recommended to use the event handler only for the check event (e.g. logs) -func (n *Node) txChecker(ctx context.Context, enableEventHandler bool) error { - if n.broadcaster == nil { - return nil - } - - timer := time.NewTicker(types.PollingInterval(ctx)) - defer timer.Stop() - consecutiveErrors := 0 - for { - select { - case <-ctx.Done(): - return nil - case <-timer.C: - if n.broadcaster.LenLocalPendingTx() == 0 { - continue - } - - n.logger.Debug("remaining pending txs", zap.Int("count", n.broadcaster.LenLocalPendingTx())) - - if types.SleepWithRetry(ctx, consecutiveErrors) { - return nil - } - consecutiveErrors++ - } - - pendingTx, err := n.broadcaster.PeekLocalPendingTx() - if err != nil { - return err - } - - height := int64(0) - - res, blockTime, err := n.broadcaster.CheckPendingTx(ctx, pendingTx) - if errors.Is(err, types.ErrTxNotFound) { - // tx not found - continue - } else if err != nil { - return errors.Wrap(err, "failed to check pending tx") - } else if res != nil { - // tx found - height = res.Height - // it only handles the tx if node is only broadcasting txs, not processing blocks - if enableEventHandler && len(n.eventHandlers) != 0 { - events := res.TxResult.GetEvents() - for eventIndex, event := range events { - select { - case <-ctx.Done(): - return nil - default: - } - - err := n.handleEvent(ctx, res.Height, blockTime, 0, event) - if err != nil { - n.logger.Error("failed to handle event", zap.String("tx_hash", pendingTx.TxHash), zap.Int("event_index", eventIndex), zap.String("error", err.Error())) - break - } - } - } - } - - err = n.broadcaster.RemovePendingTx(pendingTx) - if err != nil { - return errors.Wrap(err, "failed to remove pending tx") - } - n.logger.Info("tx inserted", - zap.Int64("height", height), - zap.Uint64("sequence", pendingTx.Sequence), - zap.String("tx_hash", pendingTx.TxHash), - zap.Strings("msg_types", pendingTx.MsgTypes), - zap.Int("pending_txs", n.broadcaster.LenLocalPendingTx()), - ) - consecutiveErrors = 0 - } -} diff --git a/node/query.go b/node/query.go index 2ae9575..00d83fc 100644 --- a/node/query.go +++ b/node/query.go @@ -5,10 +5,11 @@ import ( "time" ) +// QueryBlockTime queries the block time of the block at the given height. func (n Node) QueryBlockTime(ctx context.Context, height int64) (time.Time, error) { - block, err := n.rpcClient.Block(ctx, &height) + blockHeader, err := n.rpcClient.Header(ctx, &height) if err != nil { return time.Time{}, err } - return block.Block.Header.Time, nil + return blockHeader.Header.Time.UTC(), nil } diff --git a/node/rpcclient/client.go b/node/rpcclient/client.go index be82a68..4e6f5b8 100644 --- a/node/rpcclient/client.go +++ b/node/rpcclient/client.go @@ -50,6 +50,13 @@ func NewRPCClient(cdc codec.Codec, rpcAddr string) (*RPCClient, error) { }, nil } +func NewRPCClientWithClient(cdc codec.Codec, client *clienthttp.HTTP) *RPCClient { + return &RPCClient{ + HTTP: client, + cdc: cdc, + } +} + // Invoke implements the grpc ClientConq.Invoke method func (q RPCClient) Invoke(ctx context.Context, method string, req, reply interface{}, opts ...grpc.CallOption) (err error) { // In both cases, we don't allow empty request req (it will panic unexpectedly). diff --git a/node/testutil.go b/node/testutil.go new file mode 100644 index 0000000..5a24fa2 --- /dev/null +++ b/node/testutil.go @@ -0,0 +1,39 @@ +package node + +import ( + "sync" + + "github.com/initia-labs/opinit-bots/node/broadcaster" + "github.com/initia-labs/opinit-bots/node/rpcclient" + nodetypes "github.com/initia-labs/opinit-bots/node/types" + "github.com/initia-labs/opinit-bots/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" +) + +func NewTestNode( + cfg nodetypes.NodeConfig, + db types.DB, + + cdc codec.Codec, + txConfig client.TxConfig, + + rpcClient *rpcclient.RPCClient, + broadcaster *broadcaster.Broadcaster, +) *Node { + return &Node{ + rpcClient: rpcClient, + broadcaster: broadcaster, + + cfg: cfg, + db: db, + + eventHandlers: make(map[string]nodetypes.EventHandlerFn), + + cdc: cdc, + txConfig: txConfig, + + startOnce: &sync.Once{}, + } +} diff --git a/node/tx_checker.go b/node/tx_checker.go new file mode 100644 index 0000000..e67077f --- /dev/null +++ b/node/tx_checker.go @@ -0,0 +1,95 @@ +package node + +import ( + "time" + + "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +// txChecker continuously checks for pending transactions and handles events if the transaction is included in a block. +// If the transaction hash is not indexed by the node, even if the transaction is processed, the event handler will not be called. +// It is recommended to use the event handler only for logging or monitoring purposes. +// +// Parameters: +// - ctx: The context for managing the lifecycle of the txChecker. +// - enableEventHandler: A boolean flag to enable or disable event handling. +// +// Returns: +// - error: An error if the txChecker encounters an issue. +func (n *Node) txChecker(ctx types.Context, enableEventHandler bool) error { + if !n.HasBroadcaster() { + return nil + } + + timer := time.NewTicker(ctx.PollingInterval()) + defer timer.Stop() + + consecutiveErrors := 0 + for { + select { + case <-ctx.Done(): + return nil + case <-timer.C: + if n.broadcaster.LenLocalPendingTx() == 0 { + continue + } + + ctx.Logger().Debug("remaining pending txs", zap.Int("count", n.broadcaster.LenLocalPendingTx())) + + if types.SleepWithRetry(ctx, consecutiveErrors) { + return nil + } + consecutiveErrors++ + } + + pendingTx, err := n.broadcaster.PeekLocalPendingTx() + if err != nil { + return err + } + + height := int64(0) + + res, blockTime, err := n.broadcaster.CheckPendingTx(ctx, pendingTx) + if errors.Is(err, types.ErrTxNotFound) { + // tx not found + continue + } else if err != nil { + return errors.Wrap(err, "failed to check pending tx") + } else if res != nil { + // tx found + height = res.Height + // handle the transaction only if the node is broadcasting transactions and not processing blocks. + if enableEventHandler && len(n.eventHandlers) != 0 { + events := res.TxResult.GetEvents() + for eventIndex, event := range events { + select { + case <-ctx.Done(): + return nil + default: + } + + err := n.handleEvent(ctx, res.Height, blockTime, 0, res.Tx, types.MustUint64ToInt64(uint64(res.Index)), event) + if err != nil { + ctx.Logger().Error("failed to handle event", zap.String("tx_hash", pendingTx.TxHash), zap.Int("event_index", eventIndex), zap.String("error", err.Error())) + break + } + } + } + } + + err = n.broadcaster.RemovePendingTx(ctx, pendingTx) + if err != nil { + return errors.Wrap(err, "failed to remove pending tx") + } + ctx.Logger().Info("tx inserted", + zap.Int64("height", height), + zap.Uint64("sequence", pendingTx.Sequence), + zap.String("tx_hash", pendingTx.TxHash), + zap.Strings("msg_types", pendingTx.MsgTypes), + zap.Int("pending_txs", n.broadcaster.LenLocalPendingTx()), + ) + consecutiveErrors = 0 + } +} diff --git a/node/types/config.go b/node/types/config.go index 0af26fe..eac17c7 100644 --- a/node/types/config.go +++ b/node/types/config.go @@ -39,14 +39,5 @@ func (nc NodeConfig) Validate() error { if nc.Bech32Prefix == "" { return fmt.Errorf("bech32 prefix is empty") } - - // Validated in broadcaster - // - // if nc.BroadcasterConfig != nil { - // if err := nc.BroadcasterConfig.Validate(); err != nil { - // return err - // } - // } - return nil } diff --git a/node/types/db.go b/node/types/db.go new file mode 100644 index 0000000..04b0968 --- /dev/null +++ b/node/types/db.go @@ -0,0 +1,13 @@ +package types + +import ( + dbtypes "github.com/initia-labs/opinit-bots/db/types" +) + +func MarshalHeight(height int64) []byte { + return dbtypes.FromInt64(height) +} + +func UnmarshalHeight(heightBytes []byte) (int64, error) { + return dbtypes.ToInt64(heightBytes) +} diff --git a/node/types/errors.go b/node/types/errors.go index 23c2713..8a48d8a 100644 --- a/node/types/errors.go +++ b/node/types/errors.go @@ -1,5 +1,24 @@ package types -import "github.com/pkg/errors" +import ( + "context" + "time" + + "github.com/pkg/errors" +) var ErrIgnoreAndTryLater = errors.New("try later") + +// HandleErrIgnoreAndTryLater handles the error and returns true if the error is ErrIgnoreAndTryLater. +// If the error is ErrIgnoreAndTryLater, it sleeps for a minute and returns true. +func HandleErrIgnoreAndTryLater(ctx context.Context, err error) bool { + if errors.Is(err, ErrIgnoreAndTryLater) { + sleep := time.NewTimer(time.Minute) + select { + case <-ctx.Done(): + case <-sleep.C: + } + return true + } + return false +} diff --git a/node/types/handler.go b/node/types/handler.go index 4973b7d..2e9c682 100644 --- a/node/types/handler.go +++ b/node/types/handler.go @@ -1,24 +1,29 @@ package types import ( - "context" "time" abcitypes "github.com/cometbft/cometbft/abci/types" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" comettypes "github.com/cometbft/cometbft/types" + "github.com/initia-labs/opinit-bots/types" ) +// EventHandlerArgs is the argument for the event handler +// if the event is from FinalizeBlock, Tx is nil type EventHandlerArgs struct { BlockHeight int64 BlockTime time.Time LatestHeight int64 TxIndex int64 + Tx comettypes.Tx EventAttributes []abcitypes.EventAttribute } -type EventHandlerFn func(context.Context, EventHandlerArgs) error +// EventHandlerFn is the event handler function +type EventHandlerFn func(types.Context, EventHandlerArgs) error +// TxHandlerArgs is the argument for the tx handler type TxHandlerArgs struct { BlockHeight int64 BlockTime time.Time @@ -28,28 +33,35 @@ type TxHandlerArgs struct { Success bool } -type TxHandlerFn func(context.Context, TxHandlerArgs) error +// TxHandlerFn is the tx handler function +type TxHandlerFn func(types.Context, TxHandlerArgs) error +// BeginBlockArgs is the argument for the begin block handler type BeginBlockArgs struct { BlockID []byte Block cmtproto.Block LatestHeight int64 } -type BeginBlockHandlerFn func(context.Context, BeginBlockArgs) error +// BeginBlockHandlerFn is the begin block handler function +type BeginBlockHandlerFn func(types.Context, BeginBlockArgs) error +// EndBlockArgs is the argument for the end block handler type EndBlockArgs struct { BlockID []byte Block cmtproto.Block LatestHeight int64 } -type EndBlockHandlerFn func(context.Context, EndBlockArgs) error +// EndBlockHandlerFn is the end block handler function +type EndBlockHandlerFn func(types.Context, EndBlockArgs) error +// RawBlockArgs is the argument for the raw block handler type RawBlockArgs struct { BlockHeight int64 LatestHeight int64 BlockBytes []byte } -type RawBlockHandlerFn func(context.Context, RawBlockArgs) error +// RawBlockHandlerFn is the raw block handler function +type RawBlockHandlerFn func(types.Context, RawBlockArgs) error diff --git a/node/types/key.go b/node/types/key.go index bd2c9a1..89678a3 100644 --- a/node/types/key.go +++ b/node/types/key.go @@ -2,5 +2,5 @@ package types var ( // Keys - LastProcessedBlockHeightKey = []byte("last_processed_block_height") + SyncedHeightKey = []byte("synced_height") ) diff --git a/provider/child/child.go b/provider/child/child.go index 8715516..2dd7326 100644 --- a/provider/child/child.go +++ b/provider/child/child.go @@ -2,8 +2,6 @@ package child import ( "bytes" - "context" - "errors" "go.uber.org/zap" @@ -19,10 +17,13 @@ import ( "github.com/initia-labs/opinit-bots/keys" "github.com/initia-labs/opinit-bots/merkle" + merkletypes "github.com/initia-labs/opinit-bots/merkle/types" "github.com/initia-labs/opinit-bots/node" btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" + + "github.com/pkg/errors" ) type BaseChild struct { @@ -35,9 +36,7 @@ type BaseChild struct { initializeTreeFn func(int64) (bool, error) - cfg nodetypes.NodeConfig - db types.DB - logger *zap.Logger + cfg nodetypes.NodeConfig opchildQueryClient opchildtypes.QueryClient @@ -51,21 +50,21 @@ type BaseChild struct { func NewBaseChildV1( cfg nodetypes.NodeConfig, - db types.DB, logger *zap.Logger, + db types.DB, ) *BaseChild { appCodec, txConfig, err := GetCodec(cfg.Bech32Prefix) if err != nil { - panic(err) + panic(errors.Wrap(err, "failed to get codec")) } - node, err := node.NewNode(cfg, db, logger, appCodec, txConfig) + node, err := node.NewNode(cfg, db, appCodec, txConfig) if err != nil { - panic(err) + panic(errors.Wrap(err, "failed to create node")) } - mk, err := merkle.NewMerkle(db.WithPrefix([]byte(types.MerkleName)), ophosttypes.GenerateNodeHash) + mk, err := merkle.NewMerkle(ophosttypes.GenerateNodeHash) if err != nil { - panic(err) + panic(errors.Wrap(err, "failed to create merkle")) } ch := &BaseChild{ @@ -74,9 +73,7 @@ func NewBaseChildV1( node: node, mk: mk, - cfg: cfg, - db: db, - logger: logger, + cfg: cfg, opchildQueryClient: opchildtypes.NewQueryClient(node.GetRPCClient()), @@ -100,8 +97,10 @@ func GetCodec(bech32Prefix string) (codec.Codec, client.TxConfig, error) { }) } +// Initialize initializes the child node. +// if the synced height of the node is initialized, it will delete the future working trees and set initializeTreeFn. func (b *BaseChild) Initialize( - ctx context.Context, + ctx types.Context, processedHeight int64, startOutputIndex uint64, bridgeInfo ophosttypes.QueryBridgeResponse, @@ -126,17 +125,17 @@ func (b *BaseChild) Initialize( return 0, err } - err = b.mk.DeleteFutureFinalizedTrees(l2Sequence) + err = merkle.DeleteFutureFinalizedTrees(b.DB(), l2Sequence) if err != nil { return 0, err } } b.initializeTreeFn = func(blockHeight int64) (bool, error) { if processedHeight+1 == blockHeight { - b.logger.Info("initialize tree", zap.Uint64("index", startOutputIndex)) - err := b.mk.InitializeWorkingTree(startOutputIndex, l2Sequence) + ctx.Logger().Info("initialize tree", zap.Uint64("index", startOutputIndex)) + err := b.mk.InitializeWorkingTree(types.MustInt64ToUint64(blockHeight), startOutputIndex, l2Sequence) if err != nil { - return false, err + return false, errors.Wrap(err, "failed to initialize working tree") } return true, nil } @@ -145,12 +144,13 @@ func (b *BaseChild) Initialize( } version := types.MustInt64ToUint64(processedHeight) - err = b.mk.DeleteFutureWorkingTrees(version + 1) + err = merkle.DeleteFutureWorkingTrees(b.DB(), version+1) if err != nil { return 0, err } } + // if oracle config is set in the bridge config, check if the oracle account has the grant from one of the executors if b.OracleEnabled() && oracleKeyringConfig != nil { executors, err := b.QueryExecutors(ctx) if err != nil { @@ -188,25 +188,30 @@ func (b *BaseChild) Initialize( return l2Sequence, nil } -func (b *BaseChild) Start(ctx context.Context) { - b.logger.Info("child start", zap.Int64("height", b.Height())) +func (b *BaseChild) Start(ctx types.Context) { + ctx.Logger().Info("child start", zap.Int64("height", b.Height())) b.node.Start(ctx) } - -func (b BaseChild) BroadcastMsgs(msgs btypes.ProcessedMsgs) { - if len(msgs.Msgs) == 0 { +func (b BaseChild) BroadcastProcessedMsgs(batch ...btypes.ProcessedMsgs) { + if len(batch) == 0 { return } + broadcaster := b.node.MustGetBroadcaster() - b.node.MustGetBroadcaster().BroadcastMsgs(msgs) + for _, processedMsgs := range batch { + if len(processedMsgs.Msgs) == 0 { + continue + } + broadcaster.BroadcastProcessedMsgs(processedMsgs) + } } -func (b BaseChild) ProcessedMsgsToRawKV(msgs []btypes.ProcessedMsgs, delete bool) ([]types.RawKV, error) { - if len(msgs) == 0 { - return nil, nil - } +func (b BaseChild) DB() types.DB { + return b.node.DB() +} - return b.node.MustGetBroadcaster().ProcessedMsgsToRawKV(msgs, delete) +func (b BaseChild) Codec() codec.Codec { + return b.node.Codec() } func (b BaseChild) BridgeId() uint64 { @@ -217,7 +222,7 @@ func (b BaseChild) OracleEnabled() bool { return b.bridgeInfo.BridgeConfig.OracleEnabled } -func (b BaseChild) HasKey() bool { +func (b BaseChild) HasBroadcaster() bool { return b.node.HasBroadcaster() } @@ -241,14 +246,6 @@ func (b BaseChild) Node() *node.Node { return b.node } -func (b BaseChild) Logger() *zap.Logger { - return b.logger -} - -func (b BaseChild) DB() types.DB { - return b.db -} - /// MsgQueue func (b BaseChild) GetMsgQueue() map[string][]sdk.Msg { @@ -271,41 +268,35 @@ func (b BaseChild) GetProcessedMsgs() []btypes.ProcessedMsgs { return b.processedMsgs } -func (b *BaseChild) AppendProcessedMsgs(msgs btypes.ProcessedMsgs) { - b.processedMsgs = append(b.processedMsgs, msgs) +func (b *BaseChild) AppendProcessedMsgs(msgs ...btypes.ProcessedMsgs) { + b.processedMsgs = append(b.processedMsgs, msgs...) } func (b *BaseChild) EmptyProcessedMsgs() { b.processedMsgs = b.processedMsgs[:0] } -/// Merkle - +// / Merkle func (b BaseChild) Merkle() *merkle.Merkle { return b.mk } -func (b BaseChild) GetWorkingTreeIndex() (uint64, error) { +func (b BaseChild) WorkingTree() (merkletypes.TreeInfo, error) { if b.mk == nil { - return 0, errors.New("merkle is not initialized") + return merkletypes.TreeInfo{}, errors.New("merkle is not initialized") } - return b.mk.GetWorkingTreeIndex() + return b.mk.WorkingTree() } -func (b BaseChild) GetStartLeafIndex() (uint64, error) { - if b.mk == nil { - return 0, errors.New("merkle is not initialized") - } - return b.mk.GetStartLeafIndex() -} - -func (b BaseChild) GetWorkingTreeLeafCount() (uint64, error) { - if b.mk == nil { - return 0, errors.New("merkle is not initialized") +func (b BaseChild) MustGetWorkingTree() merkletypes.TreeInfo { + tree, err := b.WorkingTree() + if err != nil { + panic(err) } - return b.mk.GetWorkingTreeLeafCount() + return tree } +// keyringConfigs returns the keyring configs for the base and oracle accounts. func (b *BaseChild) keyringConfigs(baseConfig *btypes.KeyringConfig, oracleConfig *btypes.KeyringConfig) []btypes.KeyringConfig { var configs []btypes.KeyringConfig if baseConfig != nil { diff --git a/provider/child/msgs.go b/provider/child/msgs.go index 54201de..f4cd57b 100644 --- a/provider/child/msgs.go +++ b/provider/child/msgs.go @@ -1,13 +1,14 @@ package child import ( - "errors" - opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" "github.com/initia-labs/opinit-bots/types" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/authz" + + "github.com/pkg/errors" ) func (b BaseChild) GetMsgFinalizeTokenDeposit( @@ -24,7 +25,7 @@ func (b BaseChild) GetMsgFinalizeTokenDeposit( if errors.Is(err, types.ErrKeyNotSet) { return nil, "", nil } - return nil, "", err + return nil, "", errors.Wrap(err, "failed to get address") } msg := opchildtypes.NewMsgFinalizeTokenDeposit( @@ -39,7 +40,7 @@ func (b BaseChild) GetMsgFinalizeTokenDeposit( ) err = msg.Validate(b.node.AccountCodec()) if err != nil { - return nil, "", err + return nil, "", errors.Wrap(err, "failed to validate msg") } return msg, sender, nil } @@ -48,16 +49,12 @@ func (b BaseChild) GetMsgUpdateOracle( height int64, data []byte, ) (sdk.Msg, string, error) { - oracleAddress, err := b.OracleAccountAddress() + oracleAddressString, err := b.OracleAccountAddressString() if err != nil { if errors.Is(err, types.ErrKeyNotSet) { return nil, "", nil } - return nil, "", err - } - oracleAddressString, err := b.OracleAccountAddressString() - if err != nil { - return nil, "", err + return nil, "", errors.Wrap(err, "failed to get address") } if b.oracleAccountGranter == "" { @@ -71,9 +68,26 @@ func (b BaseChild) GetMsgUpdateOracle( ) err = msg.Validate(b.node.AccountCodec()) if err != nil { - return nil, "", err + return nil, "", errors.Wrap(err, "failed to validate msg") + } + + authzMsg, err := CreateAuthzMsg(oracleAddressString, msg) + if err != nil { + return nil, "", errors.Wrap(err, "failed to create authz msg") + } + return authzMsg, oracleAddressString, nil +} + +func CreateAuthzMsg(grantee string, msg sdk.Msg) (sdk.Msg, error) { + msgsAny := make([]*cdctypes.Any, 1) + any, err := cdctypes.NewAnyWithValue(msg) + if err != nil { + return nil, errors.Wrap(err, "failed to create any") } + msgsAny[0] = any - authzMsgExec := authz.NewMsgExec(oracleAddress, []sdk.Msg{msg}) - return &authzMsgExec, oracleAddressString, nil + return &authz.MsgExec{ + Grantee: grantee, + Msgs: msgsAny, + }, err } diff --git a/provider/child/parse.go b/provider/child/parse.go index 1c96a1d..f295d31 100644 --- a/provider/child/parse.go +++ b/provider/child/parse.go @@ -7,6 +7,7 @@ import ( "cosmossdk.io/math" abcitypes "github.com/cometbft/cometbft/abci/types" opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + "github.com/pkg/errors" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -43,6 +44,7 @@ func ParseFinalizeDeposit(eventAttrs []abcitypes.EventAttribute) ( case opchildtypes.AttributeKeyL1Sequence: l1Sequence, err = strconv.ParseUint(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse l1 sequence") return } case opchildtypes.AttributeKeySender: @@ -63,6 +65,7 @@ func ParseFinalizeDeposit(eventAttrs []abcitypes.EventAttribute) ( case opchildtypes.AttributeKeyFinalizeHeight: l1BlockHeight, err = strconv.ParseInt(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse l1 block height") return } default: diff --git a/provider/child/query.go b/provider/child/query.go index cdeea9e..bcd9dec 100644 --- a/provider/child/query.go +++ b/provider/child/query.go @@ -78,19 +78,19 @@ func (b BaseChild) QueryGrantsRequest(ctx context.Context, granter, grantee, msg return res, nil } -func (b BaseChild) QueryGranteeGrants(ctx context.Context, grantee string) ([]*authz.GrantAuthorization, error) { +func (b BaseChild) QueryGranteeGrants(botCtx types.Context, grantee string) ([]*authz.GrantAuthorization, error) { req := &authz.QueryGranteeGrantsRequest{ Grantee: grantee, Pagination: &query.PageRequest{ Limit: 100, }, } - ctx, cancel := rpcclient.GetQueryContext(ctx, 0) + ctx, cancel := rpcclient.GetQueryContext(botCtx, 0) defer cancel() authzClient := authz.NewQueryClient(b.node.GetRPCClient()) - ticker := time.NewTicker(types.PollingInterval(ctx)) + ticker := time.NewTicker(botCtx.PollingInterval()) defer ticker.Stop() result := make([]*authz.GrantAuthorization, 0) diff --git a/provider/child/testutil.go b/provider/child/testutil.go new file mode 100644 index 0000000..c0a4382 --- /dev/null +++ b/provider/child/testutil.go @@ -0,0 +1,41 @@ +package child + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + + "github.com/initia-labs/opinit-bots/merkle" + "github.com/initia-labs/opinit-bots/node" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + nodetypes "github.com/initia-labs/opinit-bots/node/types" +) + +func NewTestBaseChild( + version uint8, + + node *node.Node, + mk *merkle.Merkle, + + bridgeInfo ophosttypes.QueryBridgeResponse, + + initializeTreeFn func(int64) (bool, error), + + cfg nodetypes.NodeConfig, +) *BaseChild { + return &BaseChild{ + version: version, + + node: node, + mk: mk, + + bridgeInfo: bridgeInfo, + + initializeTreeFn: initializeTreeFn, + + cfg: cfg, + + processedMsgs: make([]btypes.ProcessedMsgs, 0), + msgQueue: make(map[string][]sdk.Msg), + } +} diff --git a/provider/child/tree.go b/provider/child/tree.go index 5e976fd..cee3ef3 100644 --- a/provider/child/tree.go +++ b/provider/child/tree.go @@ -1,10 +1,12 @@ package child +import "github.com/pkg/errors" + func (b *BaseChild) InitializeTree(blockHeight int64) bool { if b.initializeTreeFn != nil { ok, err := b.initializeTreeFn(blockHeight) if err != nil { - panic("failed to initialize working tree: " + err.Error()) + panic(errors.Wrap(err, "failed to initialize working tree")) } return ok } diff --git a/provider/host/host.go b/provider/host/host.go index e06614f..b29e3eb 100644 --- a/provider/host/host.go +++ b/provider/host/host.go @@ -1,9 +1,6 @@ package host import ( - "context" - "errors" - "go.uber.org/zap" "github.com/cosmos/cosmos-sdk/client" @@ -19,6 +16,8 @@ import ( btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" nodetypes "github.com/initia-labs/opinit-bots/node/types" "github.com/initia-labs/opinit-bots/types" + + "github.com/pkg/errors" ) type BaseHost struct { @@ -28,9 +27,7 @@ type BaseHost struct { bridgeInfo ophosttypes.QueryBridgeResponse - cfg nodetypes.NodeConfig - db types.DB - logger *zap.Logger + cfg nodetypes.NodeConfig ophostQueryClient ophosttypes.QueryClient @@ -38,17 +35,15 @@ type BaseHost struct { msgQueue map[string][]sdk.Msg } -func NewBaseHostV1(cfg nodetypes.NodeConfig, - db types.DB, logger *zap.Logger, -) *BaseHost { +func NewBaseHostV1(cfg nodetypes.NodeConfig, db types.DB) *BaseHost { appCodec, txConfig, err := GetCodec(cfg.Bech32Prefix) if err != nil { - panic(err) + panic(errors.Wrap(err, "failed to get codec")) } - node, err := node.NewNode(cfg, db, logger, appCodec, txConfig) + node, err := node.NewNode(cfg, db, appCodec, txConfig) if err != nil { - panic(err) + panic(errors.Wrap(err, "failed to create node")) } h := &BaseHost{ @@ -56,9 +51,7 @@ func NewBaseHostV1(cfg nodetypes.NodeConfig, node: node, - cfg: cfg, - db: db, - logger: logger, + cfg: cfg, ophostQueryClient: ophosttypes.NewQueryClient(node.GetRPCClient()), @@ -79,38 +72,44 @@ func GetCodec(bech32Prefix string) (codec.Codec, client.TxConfig, error) { }) } -func (b *BaseHost) Initialize(ctx context.Context, processedHeight int64, bridgeInfo ophosttypes.QueryBridgeResponse, keyringConfig *btypes.KeyringConfig) error { +func (b *BaseHost) Initialize(ctx types.Context, processedHeight int64, bridgeInfo ophosttypes.QueryBridgeResponse, keyringConfig *btypes.KeyringConfig) error { err := b.node.Initialize(ctx, processedHeight, b.keyringConfigs(keyringConfig)) if err != nil { - return err + return errors.Wrap(err, "failed to initialize node") } b.SetBridgeInfo(bridgeInfo) return nil } -func (b *BaseHost) Start(ctx context.Context) { +func (b *BaseHost) Start(ctx types.Context) { if b.cfg.ProcessType == nodetypes.PROCESS_TYPE_ONLY_BROADCAST { - b.logger.Info("host start") + ctx.Logger().Info("host start") } else { - b.logger.Info("host start", zap.Int64("height", b.node.GetHeight())) + ctx.Logger().Info("host start", zap.Int64("height", b.node.GetHeight())) } b.node.Start(ctx) } -func (b BaseHost) BroadcastMsgs(msgs btypes.ProcessedMsgs) { - if len(msgs.Msgs) == 0 { +func (b BaseHost) BroadcastProcessedMsgs(batch ...btypes.ProcessedMsgs) { + if len(batch) == 0 { return } + broadcaster := b.node.MustGetBroadcaster() - b.node.MustGetBroadcaster().BroadcastMsgs(msgs) + for _, processedMsgs := range batch { + if len(processedMsgs.Msgs) == 0 { + continue + } + broadcaster.BroadcastProcessedMsgs(processedMsgs) + } } -func (b BaseHost) ProcessedMsgsToRawKV(msgs []btypes.ProcessedMsgs, delete bool) ([]types.RawKV, error) { - if len(msgs) == 0 { - return nil, nil - } +func (b BaseHost) DB() types.DB { + return b.node.DB() +} - return b.node.MustGetBroadcaster().ProcessedMsgsToRawKV(msgs, delete) +func (b BaseHost) Codec() codec.Codec { + return b.node.Codec() } func (b BaseHost) BridgeId() uint64 { @@ -129,7 +128,7 @@ func (b BaseHost) BridgeInfo() ophosttypes.QueryBridgeResponse { return b.bridgeInfo } -func (b BaseHost) HasKey() bool { +func (b BaseHost) HasBroadcaster() bool { return b.node.HasBroadcaster() } @@ -145,14 +144,6 @@ func (b BaseHost) Node() *node.Node { return b.node } -func (b BaseHost) Logger() *zap.Logger { - return b.logger -} - -func (b BaseHost) DB() types.DB { - return b.db -} - /// MsgQueue func (b BaseHost) GetMsgQueue() map[string][]sdk.Msg { @@ -175,8 +166,8 @@ func (b BaseHost) GetProcessedMsgs() []btypes.ProcessedMsgs { return b.processedMsgs } -func (b *BaseHost) AppendProcessedMsgs(msgs btypes.ProcessedMsgs) { - b.processedMsgs = append(b.processedMsgs, msgs) +func (b *BaseHost) AppendProcessedMsgs(msgs ...btypes.ProcessedMsgs) { + b.processedMsgs = append(b.processedMsgs, msgs...) } func (b *BaseHost) EmptyProcessedMsgs() { diff --git a/provider/host/msgs.go b/provider/host/msgs.go index 62ff290..da057a9 100644 --- a/provider/host/msgs.go +++ b/provider/host/msgs.go @@ -1,11 +1,11 @@ package host import ( - "errors" - ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" + sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -20,7 +20,7 @@ func (b BaseHost) GetMsgProposeOutput( if errors.Is(err, types.ErrKeyNotSet) { return nil, "", nil } - return nil, "", err + return nil, "", errors.Wrap(err, "failed to get address") } msg := ophosttypes.NewMsgProposeOutput( @@ -32,7 +32,7 @@ func (b BaseHost) GetMsgProposeOutput( ) err = msg.Validate(b.node.AccountCodec()) if err != nil { - return nil, "", err + return nil, "", errors.Wrap(err, "failed to validate msg") } return msg, sender, nil } @@ -43,7 +43,7 @@ func (b BaseHost) CreateBatchMsg(batchBytes []byte) (sdk.Msg, string, error) { if errors.Is(err, types.ErrKeyNotSet) { return nil, "", nil } - return nil, "", err + return nil, "", errors.Wrap(err, "failed to get address") } msg := ophosttypes.NewMsgRecordBatch( @@ -53,7 +53,7 @@ func (b BaseHost) CreateBatchMsg(batchBytes []byte) (sdk.Msg, string, error) { ) err = msg.Validate(b.node.AccountCodec()) if err != nil { - return nil, "", err + return nil, "", errors.Wrap(err, "failed to validate msg") } return msg, submitter, nil } diff --git a/provider/host/parse.go b/provider/host/parse.go index 4415da8..a736207 100644 --- a/provider/host/parse.go +++ b/provider/host/parse.go @@ -7,6 +7,7 @@ import ( abcitypes "github.com/cometbft/cometbft/abci/types" ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/pkg/errors" ) func missingAttrsError(missingAttrs map[string]struct{}) error { @@ -58,20 +59,27 @@ func ParseMsgUpdateBatchInfo(eventAttrs []abcitypes.EventAttribute) ( case ophosttypes.AttributeKeyBridgeId: bridgeId, err = strconv.ParseUint(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse bridge id") return } case ophosttypes.AttributeKeyBatchChainType: + if attr.Value != ophosttypes.BatchInfo_CHAIN_TYPE_INITIA.StringWithoutPrefix() && attr.Value != ophosttypes.BatchInfo_CHAIN_TYPE_CELESTIA.StringWithoutPrefix() { + err = errors.New("unknown chain type") + return + } chain = attr.Value case ophosttypes.AttributeKeyBatchSubmitter: submitter = attr.Value case ophosttypes.AttributeKeyFinalizedOutputIndex: outputIndex, err = strconv.ParseUint(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse output index") return } case ophosttypes.AttributeKeyFinalizedL2BlockNumber: l2BlockNumber, err = strconv.ParseInt(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse l2 block number") return } default: @@ -103,11 +111,13 @@ func ParseMsgInitiateDeposit(eventAttrs []abcitypes.EventAttribute) ( case ophosttypes.AttributeKeyBridgeId: bridgeId, err = strconv.ParseUint(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse bridge id") return } case ophosttypes.AttributeKeyL1Sequence: l1Sequence, err = strconv.ParseUint(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse l1 sequence") return } case ophosttypes.AttributeKeyFrom: @@ -156,21 +166,25 @@ func ParseMsgProposeOutput(eventAttrs []abcitypes.EventAttribute) ( case ophosttypes.AttributeKeyBridgeId: bridgeId, err = strconv.ParseUint(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse bridge id") return } case ophosttypes.AttributeKeyOutputIndex: outputIndex, err = strconv.ParseUint(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse output index") return } case ophosttypes.AttributeKeyL2BlockNumber: l2BlockNumber, err = strconv.ParseInt(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse l2 block number") return } case ophosttypes.AttributeKeyOutputRoot: outputRoot, err = hex.DecodeString(attr.Value) if err != nil { + err = errors.Wrap(err, "failed to decode output root") return } default: @@ -202,16 +216,19 @@ func ParseMsgFinalizeWithdrawal(eventAttrs []abcitypes.EventAttribute) ( case ophosttypes.AttributeKeyBridgeId: bridgeId, err = strconv.ParseUint(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse bridge id") return } case ophosttypes.AttributeKeyOutputIndex: outputIndex, err = strconv.ParseUint(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse output index") return } case ophosttypes.AttributeKeyL2Sequence: l2Sequence, err = strconv.ParseUint(attr.Value, 10, 64) if err != nil { + err = errors.Wrap(err, "failed to parse l2 sequence") return } case ophosttypes.AttributeKeyFrom: diff --git a/provider/host/query.go b/provider/host/query.go index 1391eae..2fb3bbc 100644 --- a/provider/host/query.go +++ b/provider/host/query.go @@ -2,7 +2,6 @@ package host import ( "context" - "errors" "fmt" "strconv" "strings" @@ -14,6 +13,7 @@ import ( "github.com/initia-labs/opinit-bots/node/rpcclient" "github.com/initia-labs/opinit-bots/types" + "github.com/pkg/errors" ) func (b BaseHost) QueryBridgeConfig(ctx context.Context, bridgeId uint64) (*ophosttypes.QueryBridgeResponse, error) { @@ -148,17 +148,17 @@ func (b BaseHost) QueryBatchInfos(ctx context.Context, bridgeId uint64) (*ophost return b.ophostQueryClient.BatchInfos(ctx, req) } -func (b BaseHost) QueryDepositTxHeight(ctx context.Context, bridgeId uint64, l1Sequence uint64) (int64, error) { +func (b BaseHost) QueryDepositTxHeight(botCtx types.Context, bridgeId uint64, l1Sequence uint64) (int64, error) { if l1Sequence == 0 { return 0, nil } - ctx, cancel := rpcclient.GetQueryContext(ctx, 0) - defer cancel() - - ticker := time.NewTicker(types.PollingInterval(ctx)) + ticker := time.NewTicker(botCtx.PollingInterval()) defer ticker.Stop() + ctx, cancel := rpcclient.GetQueryContext(botCtx, 0) + defer cancel() + query := fmt.Sprintf("%s.%s = %d", ophosttypes.EventTypeInitiateTokenDeposit, ophosttypes.AttributeKeyL1Sequence, diff --git a/provider/host/testutil.go b/provider/host/testutil.go new file mode 100644 index 0000000..a53d4ed --- /dev/null +++ b/provider/host/testutil.go @@ -0,0 +1,23 @@ +package host + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + + "github.com/initia-labs/opinit-bots/node" + btypes "github.com/initia-labs/opinit-bots/node/broadcaster/types" + nodetypes "github.com/initia-labs/opinit-bots/node/types" +) + +func NewTestBaseHost(version uint8, node *node.Node, bridgeInfo ophosttypes.QueryBridgeResponse, cfg nodetypes.NodeConfig, ophostQueryClient ophosttypes.QueryClient) *BaseHost { + return &BaseHost{ + version: version, + node: node, + bridgeInfo: bridgeInfo, + cfg: cfg, + ophostQueryClient: ophostQueryClient, + processedMsgs: make([]btypes.ProcessedMsgs, 0), + msgQueue: make(map[string][]sdk.Msg), + } +} diff --git a/server/server.go b/server/server.go index a4e7812..7a7fd85 100644 --- a/server/server.go +++ b/server/server.go @@ -14,9 +14,10 @@ type Server struct { func NewServer(cfg types.ServerConfig) *Server { app := fiber.New() app.Use(cors.New(cors.Config{ - AllowOrigins: cfg.AllowOrigins, - AllowHeaders: cfg.AllowHeaders, - AllowMethods: cfg.AllowMethods, + AllowOrigins: cfg.AllowOrigins, + AllowHeaders: cfg.AllowHeaders, + AllowMethods: cfg.AllowMethods, + AllowCredentials: true, })) return &Server{ diff --git a/txutils/utils.go b/txutils/utils.go index a96e5e7..7af4f5d 100644 --- a/txutils/utils.go +++ b/txutils/utils.go @@ -1,6 +1,10 @@ package txutils import ( + "fmt" + + comettypes "github.com/cometbft/cometbft/types" + "github.com/cosmos/cosmos-sdk/client" sdk "github.com/cosmos/cosmos-sdk/types" authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" @@ -37,3 +41,7 @@ func ChangeMsgsFromTx(txConfig client.TxConfig, tx authsigning.Tx, msgs []sdk.Ms return builder.GetTx(), nil } + +func TxHash(txBytes []byte) string { + return fmt.Sprintf("%X", comettypes.Tx(txBytes).Hash()) +} diff --git a/txutils/utils_test.go b/txutils/utils_test.go new file mode 100644 index 0000000..d70c096 --- /dev/null +++ b/txutils/utils_test.go @@ -0,0 +1,36 @@ +package txutils + +import ( + "testing" + + "github.com/initia-labs/OPinit/x/opchild" + opchildtypes "github.com/initia-labs/OPinit/x/opchild/types" + "github.com/initia-labs/opinit-bots/keys" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client/tx" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/auth" +) + +func TestChangeMsgsFromTx(t *testing.T) { + unlock := keys.SetSDKConfigContext("init") + _, txConfig, err := keys.CreateCodec([]keys.RegisterInterfaces{ + auth.AppModuleBasic{}.RegisterInterfaces, + opchild.AppModuleBasic{}.RegisterInterfaces, + }) + require.NoError(t, err) + unlock() + + txf := tx.Factory{}.WithChainID("test_chain").WithTxConfig(txConfig) + txb, err := txf.BuildUnsignedTx(&opchildtypes.MsgUpdateOracle{Sender: "init1hrasklz3tr6s9rls4r8fjuf0k4zuha6w9rude5"}) + require.NoError(t, err) + tx := txb.GetTx() + require.Len(t, tx.GetMsgs(), 1) + require.Equal(t, sdk.MsgTypeURL(tx.GetMsgs()[0]), "/opinit.opchild.v1.MsgUpdateOracle") + tx, err = ChangeMsgsFromTx(txConfig, tx, []sdk.Msg{&opchildtypes.MsgFinalizeTokenDeposit{}, &opchildtypes.MsgExecuteMessages{}}) + require.NoError(t, err) + require.Len(t, tx.GetMsgs(), 2) + require.Equal(t, sdk.MsgTypeURL(tx.GetMsgs()[0]), "/opinit.opchild.v1.MsgFinalizeTokenDeposit") + require.Equal(t, sdk.MsgTypeURL(tx.GetMsgs()[1]), "/opinit.opchild.v1.MsgExecuteMessages") +} diff --git a/types/const.go b/types/const.go index 96a8ea5..2871fe6 100644 --- a/types/const.go +++ b/types/const.go @@ -6,8 +6,7 @@ const ( BatchName = "batch" MerkleName = "merkle" - DAHostName = "da_host" - DACelestiaName = "da_celestia" + DAName = "da" MsgUpdateOracleTypeUrl = "/opinit.opchild.v1.MsgUpdateOracle" MsgAuthzExecTypeUrl = "/cosmos.authz.v1beta1.MsgExec" diff --git a/types/context.go b/types/context.go index 3f3aa55..ed9b61b 100644 --- a/types/context.go +++ b/types/context.go @@ -4,33 +4,98 @@ import ( "context" "time" + "go.uber.org/zap" "golang.org/x/sync/errgroup" ) -type contextKey string +type Context struct { + baseCtx context.Context -var ( - ContextKeyErrGrp = contextKey("ErrGrp") - ContextKeyPollingInterval = contextKey("PollingInterval") - ContextKeyTxTimeout = contextKey("TxTimeout") -) + logger *zap.Logger -func WithErrGrp(ctx context.Context, errGrp *errgroup.Group) context.Context { - return context.WithValue(ctx, ContextKeyErrGrp, errGrp) + errGrp *errgroup.Group + pollingInterval time.Duration + txTimeout time.Duration + homePath string } -func ErrGrp(ctx context.Context) *errgroup.Group { - return ctx.Value(ContextKeyErrGrp).(*errgroup.Group) +func NewContext(baseCtx context.Context, logger *zap.Logger, homePath string) Context { + return Context{ + baseCtx: baseCtx, + + logger: logger, + homePath: homePath, + } } -func WithPollingInterval(ctx context.Context, interval time.Duration) context.Context { - return context.WithValue(ctx, ContextKeyPollingInterval, interval) +var _ context.Context = (*Context)(nil) + +func (c Context) Value(key any) any { + return c.baseCtx.Value(key) } -func PollingInterval(ctx context.Context) time.Duration { - interval := ctx.Value(ContextKeyPollingInterval) - if interval == nil { - return 100 * time.Millisecond - } - return ctx.Value(ContextKeyPollingInterval).(time.Duration) +func (c Context) Deadline() (deadline time.Time, ok bool) { + return c.baseCtx.Deadline() +} + +func (c Context) Done() <-chan struct{} { + return c.baseCtx.Done() +} + +func (c Context) Err() error { + return c.baseCtx.Err() +} + +func (c Context) WithContext(ctx context.Context) Context { + c.baseCtx = ctx + return c +} + +func (c Context) WithLogger(logger *zap.Logger) Context { + c.logger = logger + return c +} + +func (c Context) WithErrGrp(errGrp *errgroup.Group) Context { + c.errGrp = errGrp + return c +} + +func (c Context) WithPollingInterval(interval time.Duration) Context { + c.pollingInterval = interval + return c +} + +func (c Context) WithTxTimeout(timeout time.Duration) Context { + c.txTimeout = timeout + return c +} + +func (c Context) WithHomePath(homePath string) Context { + c.homePath = homePath + return c +} + +func (c Context) Context() context.Context { + return c.baseCtx +} + +func (c Context) Logger() *zap.Logger { + return c.logger +} + +func (c Context) ErrGrp() *errgroup.Group { + return c.errGrp +} + +func (c Context) PollingInterval() time.Duration { + return c.pollingInterval +} + +func (c Context) TxTimeout() time.Duration { + return c.txTimeout +} + +func (c Context) HomePath() string { + return c.homePath } diff --git a/types/db.go b/types/db.go index 39d3537..92674e7 100644 --- a/types/db.go +++ b/types/db.go @@ -6,25 +6,32 @@ type KV struct { Value []byte } -// RawKV is a key-value pair without prefixing the key. -type RawKV struct { - Key []byte - Value []byte +type BasicDB interface { + Get([]byte) ([]byte, error) + Set([]byte, []byte) error + Delete([]byte) error } type DB interface { - Get([]byte) ([]byte, error) - Set([]byte, []byte) error - RawBatchSet(...RawKV) error + BasicDB + + NewStage() CommitDB + BatchSet(...KV) error - Delete([]byte) error - Close() error - PrefixedIterate([]byte, []byte, func([]byte, []byte) (bool, error)) error - PrefixedReverseIterate([]byte, []byte, func([]byte, []byte) (bool, error)) error + Iterate([]byte, []byte, func([]byte, []byte) (bool, error)) error + ReverseIterate([]byte, []byte, func([]byte, []byte) (bool, error)) error SeekPrevInclusiveKey([]byte, []byte) ([]byte, []byte, error) WithPrefix([]byte) DB PrefixedKey([]byte) []byte UnprefixedKey([]byte) []byte - GetPath() string GetPrefix() []byte } + +type CommitDB interface { + BasicDB + + WithPrefixedKey(prefixedKey func(key []byte) []byte) CommitDB + Commit() error + Reset() + Len() int +} diff --git a/types/query.go b/types/query.go new file mode 100644 index 0000000..486e02b --- /dev/null +++ b/types/query.go @@ -0,0 +1,4 @@ +package types + +const QUERY_MAX_LIMIT_SIZE = 100 +const RAW_BLOCK_QUERY_MAX_SIZE = 100 diff --git a/types/retry.go b/types/retry.go index 1174934..9c15b8e 100644 --- a/types/retry.go +++ b/types/retry.go @@ -9,6 +9,7 @@ import ( const MaxRetryCount = 7 +// SleepWithRetry sleeps with exponential backoff. func SleepWithRetry(ctx context.Context, retry int) bool { // to avoid to sleep too long if retry > MaxRetryCount { diff --git a/types/time.go b/types/time.go new file mode 100644 index 0000000..daad436 --- /dev/null +++ b/types/time.go @@ -0,0 +1,8 @@ +package types + +import "time" + +// CurrentNanoTimestamp returns the current time in nanoseconds. +var CurrentNanoTimestamp = func() int64 { + return time.Now().UTC().UnixNano() +}