diff --git a/common/block/log.go b/common/block/log.go index fc954ab..c12d267 100644 --- a/common/block/log.go +++ b/common/block/log.go @@ -41,6 +41,10 @@ type Log struct { BlockNumber *uint256.Int `json:"blockNumber"` // hash of the transaction TxHash types.Hash `json:"transactionHash" gencodec:"required"` + + // Address of the transaction + TxAddress types.Address `json:"-"` + // index of the transaction in the block TxIndex uint `json:"transactionIndex" gencodec:"required"` // hash of the block in which the transaction was included diff --git a/common/block/receipt.go b/common/block/receipt.go index c263ec8..7d24dde 100644 --- a/common/block/receipt.go +++ b/common/block/receipt.go @@ -18,13 +18,18 @@ package block import ( "bytes" + "errors" "fmt" "github.com/amazechain/amc/api/protocol/types_pb" + "github.com/amazechain/amc/common/crypto" + "github.com/amazechain/amc/common/transaction" "github.com/amazechain/amc/common/types" "github.com/amazechain/amc/internal/avm/rlp" + "github.com/amazechain/amc/params" "github.com/amazechain/amc/utils" "github.com/golang/protobuf/proto" "github.com/holiman/uint256" + "math/big" ) const ( @@ -96,6 +101,56 @@ func (rs *Receipts) ToProtoMessage() proto.Message { } } +// DeriveFields fills the receipts with their computed fields based on consensus +// data and contextual infos like containing block and transactions. +func (rs Receipts) DeriveFields(config *params.ChainConfig, hash types.Hash, number uint64, txs []*transaction.Transaction) error { + signer := transaction.MakeSigner(config, new(big.Int).SetUint64(number)) + + logIndex := uint(0) + if len(txs) != len(rs) { + return errors.New("transaction and receipt count mismatch") + } + for i := 0; i < len(rs); i++ { + // The transaction type and hash can be retrieved from the transaction itself + rs[i].Type = txs[i].Type() + rs[i].TxHash = txs[i].Hash() + + //rs[i].EffectiveGasPrice = txs[i].inner.effectiveGasPrice(new(big.Int), baseFee) + + // block location fields + rs[i].BlockHash = hash + rs[i].BlockNumber = new(uint256.Int).SetUint64(number) + rs[i].TransactionIndex = uint(i) + + // The contract address can be derived from the transaction itself + if txs[i].To() == nil { + // Deriving the signer is expensive, only do if it's actually needed + from, _ := transaction.Sender(signer, txs[i]) + rs[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce()) + } else { + rs[i].ContractAddress = types.Address{} + } + + // The used gas can be calculated based on previous r + if i == 0 { + rs[i].GasUsed = rs[i].CumulativeGasUsed + } else { + rs[i].GasUsed = rs[i].CumulativeGasUsed - rs[i-1].CumulativeGasUsed + } + + // The derived log fields can simply be set from the block and transaction + for j := 0; j < len(rs[i].Logs); j++ { + rs[i].Logs[j].BlockNumber = uint256.NewInt(number) + rs[i].Logs[j].BlockHash = hash + rs[i].Logs[j].TxHash = rs[i].TxHash + rs[i].Logs[j].TxIndex = uint(i) + rs[i].Logs[j].Index = logIndex + logIndex++ + } + } + return nil +} + type Receipt struct { // Consensus fields: These fields are defined by the Yellow Paper Type uint8 `json:"type,omitempty"` diff --git a/common/blockchain.go b/common/blockchain.go index 2f55bd3..a5cd3ce 100644 --- a/common/blockchain.go +++ b/common/blockchain.go @@ -68,6 +68,7 @@ type IBlockChain interface { GetDepositInfo(address types.Address) (*uint256.Int, *uint256.Int) GetAccountRewardUnpaid(account types.Address) (*uint256.Int, error) + RewardsOfEpoch(number *uint256.Int, lastEpoch *uint256.Int) (map[types.Address]*uint256.Int, error) } type IMiner interface { diff --git a/common/events.go b/common/events.go index bd9fd3a..1361e38 100644 --- a/common/events.go +++ b/common/events.go @@ -57,3 +57,7 @@ type ChainHighestBlock struct { type MinedEntireEvent struct { Entire state.EntireCode } + +type ChainSideEvent struct { + Block *block.Block +} diff --git a/conf/genesis_config.go b/conf/genesis_config.go index d422bd8..78d23a1 100644 --- a/conf/genesis_config.go +++ b/conf/genesis_config.go @@ -25,6 +25,7 @@ import ( type GenesisBlockConfig struct { Config *params.ChainConfig `json:"config" yaml:"config"` + // ChainID uint64 `json:"chainID" yaml:"chainID"` Nonce uint64 `json:"nonce"` Timestamp uint64 `json:"timestamp"` diff --git a/contracts/deposit/contract.go b/contracts/deposit/contract.go index b5916ab..5049f59 100644 --- a/contracts/deposit/contract.go +++ b/contracts/deposit/contract.go @@ -20,11 +20,11 @@ import ( "bytes" "context" "embed" + "errors" "github.com/amazechain/amc/common" "github.com/amazechain/amc/common/crypto" "github.com/amazechain/amc/common/crypto/bls" "github.com/amazechain/amc/common/hexutil" - "github.com/amazechain/amc/common/transaction" "github.com/amazechain/amc/common/types" "github.com/amazechain/amc/conf" "github.com/amazechain/amc/log" @@ -92,7 +92,7 @@ func GetDepositInfo(tx kv.Tx, addr types.Address) *Info { rewardPerBlock = new(uint256.Int).Add(rewardPerBlock, uint256.NewInt(params.Wei)) // maxRewardPerEpoch = new(uint256.Int).Mul(rewardPerBlock, uint256.NewInt(FiveHundredDepositMaxTaskPerEpoch)) - case 10: //todo + case 10, 0: //todo return nil default: panic("wrong deposit amount") @@ -178,15 +178,22 @@ func (d Deposit) eventLoop() { if nil != d.consensusConfig.APos && bytes.Compare(l.Address[:], depositContractByes[:]) == 0 { log.Trace("log event topic[0]= ", "hash", l.Topics[0], "depositEventSignature", depositEventSignature, "withdrawnSignature", withdrawnSignature) if l.Topics[0] == depositEventSignature { - d.handleDepositEvent(l.TxHash, l.Data) + d.handleDepositEvent(l.TxHash, l.TxAddress, l.Data) } else if l.Topics[0] == withdrawnSignature { - d.handleWithdrawnEvent(l.TxHash, l.Data) + d.handleWithdrawnEvent(l.TxHash, l.TxAddress, l.Data) } } } case logRemovedEvent := <-d.rmLogsCh: for _, l := range logRemovedEvent.Logs { - log.Info("logEvent", "address", l.Address, "data", l.Data, "") + if nil != d.consensusConfig.APos && bytes.Compare(l.Address[:], depositContractByes[:]) == 0 { + log.Trace("log event topic[0]= ", "hash", l.Topics[0], "depositEventSignature", depositEventSignature, "withdrawnSignature", withdrawnSignature) + if l.Topics[0] == depositEventSignature { + d.handleUndoDepositEvent(l.TxHash, l.TxAddress, l.Data) + } else if l.Topics[0] == withdrawnSignature { + d.handleUndoWithdrawnEvent(l.TxHash, l.TxAddress, l.Data) + } + } } case <-d.logsSub.Err(): return @@ -198,57 +205,95 @@ func (d Deposit) eventLoop() { } } -func (d Deposit) handleDepositEvent(txHash types.Hash, data []byte) { +func (d Deposit) verifySignature(sig []byte, pub []byte, depositAmount *uint256.Int) error { // 1 + signature, err := bls.SignatureFromBytes(sig) + if err != nil { + log.Warn("cannot unpack BLS signature", "signature", hexutil.Encode(sig), "err", err) + return err + } + // 2 + publicKey, err := bls.PublicKeyFromBytes(pub) + if err != nil { + log.Warn("cannot unpack BLS publicKey", "publicKey", hexutil.Encode(pub), "err", err) + return err + } + // 3 + log.Trace("DepositEvent verify:", "signature", hexutil.Encode(signature.Marshal()), "publicKey", hexutil.Encode(publicKey.Marshal()), "msg", hexutil.Encode(depositAmount.Bytes())) + if !signature.Verify(publicKey, depositAmount.Bytes()) { + return errors.New("cannot Verify signature") + } + + return nil +} + +func (d Deposit) handleDepositEvent(txHash types.Hash, txAddress types.Address, data []byte) { + pb, amount, sig, err := UnpackDepositLogData(data) if err != nil { log.Warn("cannot unpack deposit log data") return } - // 2 - signature, err := bls.SignatureFromBytes(sig) + + if err := d.verifySignature(sig, pb, amount); err != nil { + log.Error("cannot Verify signature", "signature", hexutil.Encode(sig), "publicKey", hexutil.Encode(pb), "message", hexutil.Encode(amount.Bytes()), "err", err) + return + } + + rwTx, err := d.db.BeginRw(d.ctx) + defer rwTx.Rollback() if err != nil { - log.Warn("cannot unpack BLS signature", "signature", hexutil.Encode(sig), "err", err) + log.Error("cannot open db", "err", err) return } - // 3 - publicKey, err := bls.PublicKeyFromBytes(pb) + + var pub types.PublicKey + _ = pub.SetBytes(pb) + // + if err = rawdb.DoDeposit(rwTx, txAddress, pub, amount); err != nil { + log.Error("cannot modify database", "err", err) + } + if err = rwTx.Commit(); err != nil { + log.Error("cannot commit tx", "err", err) + } +} + +func (d Deposit) handleUndoDepositEvent(txHash types.Hash, txAddress types.Address, data []byte) { + pb, amount, sig, err := UnpackDepositLogData(data) if err != nil { - log.Warn("cannot unpack BLS publicKey", "publicKey", hexutil.Encode(pb), "err", err) + log.Warn("cannot unpack deposit log data") return } - // 4 - log.Trace("DepositEvent verify:", "signature", hexutil.Encode(signature.Marshal()), "publicKey", hexutil.Encode(publicKey.Marshal()), "msg", hexutil.Encode(amount.Bytes())) - if signature.Verify(publicKey, amount.Bytes()) { - var tx *transaction.Transaction - rwTx, err := d.db.BeginRw(d.ctx) - defer rwTx.Rollback() - if err != nil { - log.Error("cannot open db", "err", err) - return - } - tx, _, _, _, err = rawdb.ReadTransactionByHash(rwTx, txHash) - if err != nil { - log.Error("rawdb.ReadTransactionByHash", "err", err, "hash", txHash) - } + if err := d.verifySignature(sig, pb, amount); err != nil { + log.Error("cannot Verify signature", "signature", hexutil.Encode(sig), "publicKey", hexutil.Encode(pb), "message", hexutil.Encode(amount.Bytes()), "err", err) + return + } - if tx != nil { - log.Trace("add Deposit info", "address", tx.From(), "amount", amount.String()) + rwTx, err := d.db.BeginRw(d.ctx) + defer rwTx.Rollback() + if err != nil { + log.Error("cannot open db", "err", err) + return + } - var pub types.PublicKey - pub.SetBytes(publicKey.Marshal()) - // - rawdb.PutDeposit(rwTx, *tx.From(), pub, *amount) - rwTx.Commit() - } - } else { - log.Error("DepositEvent cannot Verify signature", "signature", hexutil.Encode(sig), "publicKey", hexutil.Encode(pb), "message", hexutil.Encode(amount.Bytes()), "err", err) + // + if err = rawdb.UndoDeposit(rwTx, txAddress, amount); err != nil { + log.Error("cannot modify database", "err", err) + } + if err = rwTx.Commit(); err != nil { + log.Error("cannot commit tx", "err", err) } + } -func (d Deposit) handleWithdrawnEvent(txHash types.Hash, data []byte) { - var tx *transaction.Transaction +func (d Deposit) handleWithdrawnEvent(txHash types.Hash, txAddress types.Address, data []byte) { + // 1 + amount, err := UnpackWithdrawnLogData(data) + if err != nil { + log.Warn("cannot unpack deposit log data") + return + } rwTx, err := d.db.BeginRw(d.ctx) defer rwTx.Rollback() @@ -256,20 +301,32 @@ func (d Deposit) handleWithdrawnEvent(txHash types.Hash, data []byte) { log.Error("cannot open db", "err", err) return } - tx, _, _, _, err = rawdb.ReadTransactionByHash(rwTx, txHash) + + err = rawdb.DoWithdrawn(rwTx, txAddress, amount) if err != nil { - log.Error("rawdb.ReadTransactionByHash", "err", err, "hash", txHash) + log.Error("cannot Withdrawn deposit when handle Withdrawn Event", "err", err) return } - if tx == nil { - log.Error("cannot find Transaction", "err", err, "hash", txHash) - return + if err = rwTx.Commit(); nil != err { + log.Error("cannot commit when handle Withdrawn Event", "err", err) } +} - err = rawdb.DeleteDeposit(rwTx, *tx.From()) +func (d Deposit) handleUndoWithdrawnEvent(txHash types.Hash, txAddress types.Address, data []byte) { + amount, err := UnpackWithdrawnLogData(data) if err != nil { - log.Error("cannot delete deposit", "err", err) + log.Warn("cannot unpack deposit log data") + return + } + rwTx, err := d.db.BeginRw(d.ctx) + defer rwTx.Rollback() + + if err = rawdb.UndoWithdrawn(rwTx, txAddress, amount); err != nil { + log.Error("cannot Undo Withdrawn deposit when handle remove Withdrawn log Event", "err", err) return } - rwTx.Commit() + + if err = rwTx.Commit(); nil != err { + log.Error("cannot commit when handle Withdrawn Event", "err", err) + } } diff --git a/contracts/deposit/logs.go b/contracts/deposit/logs.go index c068503..a4aa345 100644 --- a/contracts/deposit/logs.go +++ b/contracts/deposit/logs.go @@ -47,3 +47,25 @@ func UnpackDepositLogData(data []byte) (pubkey []byte, amount *uint256.Int, sign return unpackedLogs[0].([]byte), amount, unpackedLogs[2].([]byte), nil } + +// UnpackWithdrawnLogData unpacks the data from a deposit log using the ABI decoder. +func UnpackWithdrawnLogData(data []byte) (amount *uint256.Int, err error) { + reader := bytes.NewReader(depositAbiCode) + contractAbi, err := abi.JSON(reader) + if err != nil { + return nil, errors.Wrap(err, "unable to generate contract abi") + } + + unpackedLogs, err := contractAbi.Unpack("WithdrawnEvent", data) + if err != nil { + return nil, errors.Wrap(err, "unable to unpack logs") + } + amount, overflow := uint256.FromBig(unpackedLogs[0].(*big.Int)) + if overflow { + return nil, errors.New("unable to unpack amount") + } + + log.Debug("unpacked WithdrawnEvent Logs", "message", hexutil.Encode(amount.Bytes())) + + return amount, nil +} diff --git a/internal/blockchain.go b/internal/blockchain.go index 166d311..861946e 100644 --- a/internal/blockchain.go +++ b/internal/blockchain.go @@ -20,6 +20,8 @@ import ( "context" "errors" "fmt" + "github.com/amazechain/amc/common/math" + "github.com/amazechain/amc/conf" "github.com/amazechain/amc/contracts/deposit" "github.com/golang/protobuf/proto" "github.com/holiman/uint256" @@ -80,6 +82,7 @@ const ( type BlockChain struct { chainConfig *params.ChainConfig + engineConf *conf.ConsensusConfig ctx context.Context cancel context.CancelFunc genesisBlock block2.IBlock @@ -136,7 +139,7 @@ func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } -func NewBlockChain(ctx context.Context, genesisBlock block2.IBlock, engine consensus.Engine, downloader common.IDownloader, db kv.RwDB, pubsub common.IPubSub, config *params.ChainConfig) (common.IBlockChain, error) { +func NewBlockChain(ctx context.Context, genesisBlock block2.IBlock, engine consensus.Engine, downloader common.IDownloader, db kv.RwDB, pubsub common.IPubSub, config *conf.Config) (common.IBlockChain, error) { c, cancel := context.WithCancel(ctx) var current *block2.Block _ = db.View(c, func(tx kv.Tx) error { @@ -154,7 +157,8 @@ func NewBlockChain(ctx context.Context, genesisBlock block2.IBlock, engine conse numberCache, _ := lru.New[types.Hash, uint64](numberCacheLimit) headerCache, _ := lru.New[types.Hash, *block2.Header](headerCacheLimit) bc := &BlockChain{ - chainConfig: config, // Chain & network configuration + chainConfig: config.GenesisBlockCfg.Config, // Chain & network configuration + engineConf: config.GenesisBlockCfg.Engine, genesisBlock: genesisBlock, blocks: []block2.IBlock{}, //currentBlock: current, @@ -181,8 +185,8 @@ func NewBlockChain(ctx context.Context, genesisBlock block2.IBlock, engine conse bc.currentBlock.Store(current) bc.forker = NewForkChoice(bc, nil) //bc.process = avm.NewVMProcessor(ctx, bc, engine) - bc.process = NewStateProcessor(config, bc, engine) - bc.validator = NewBlockValidator(config, bc, engine) + bc.process = NewStateProcessor(config.GenesisBlockCfg.Config, bc, engine) + bc.validator = NewBlockValidator(config.GenesisBlockCfg.Config, bc, engine) return bc, nil } @@ -956,7 +960,7 @@ func (bc *BlockChain) insertChain(chain []block2.IBlock) (int, error) { // head full block(new pivot point). for block != nil && bc.skipBlock(err) { log.Debug("Writing previously known block", "number", block.Number64(), "hash", block.Hash()) - if err := bc.writeKnownBlock(nil, block); err != nil { + if err := bc.writeKnownBlock(block); err != nil { return it.index, err } lastCanon = block @@ -1176,7 +1180,7 @@ func (bc *BlockChain) insertChain(chain []block2.IBlock) (int, error) { // insertSideChain func (bc *BlockChain) insertSideChain(block block2.IBlock, it *insertIterator) (int, error) { var ( - externTd uint256.Int + externTd *uint256.Int lastBlock = block current = bc.CurrentBlock() ) @@ -1196,7 +1200,7 @@ func (bc *BlockChain) insertSideChain(block block2.IBlock, it *insertIterator) ( // we can get it directly, and not (like further below) use // the parent and then add the block on top pt := bc.GetTd(block.Hash(), block.Number64()) - externTd = *pt + externTd = pt continue } if canonical != nil && canonical.StateRoot() == block.StateRoot() { @@ -1215,14 +1219,14 @@ func (bc *BlockChain) insertSideChain(block block2.IBlock, it *insertIterator) ( return it.index, errors.New("sidechain ghost-state attack") } } - if externTd.Cmp(uint256.NewInt(0)) == 0 { - externTd = *bc.GetTd(block.ParentHash(), uint256.NewInt(0).Sub(block.Number64(), uint256.NewInt(1))) + if externTd == nil { + externTd = bc.GetTd(block.ParentHash(), uint256.NewInt(0).Sub(block.Number64(), uint256.NewInt(1))) } - externTd = *externTd.Add(&externTd, block.Difficulty()) + externTd = externTd.Add(externTd, block.Difficulty()) if !bc.HasBlock(block.Hash(), block.Number64().Uint64()) { start := time.Now() - if err := bc.WriteBlockWithoutState(block); err != nil { + if err := bc.WriteBlockWithoutState(block, externTd); err != nil { return it.index, err } log.Debug("Injected sidechain block", "number", block.Number64(), "hash", block.Hash(), @@ -1248,7 +1252,7 @@ func (bc *BlockChain) insertSideChain(block block2.IBlock, it *insertIterator) ( numbers []uint64 ) parent := it.previous() - for parent != nil && !bc.HasState(parent.StateRoot()) { + for parent != nil && !bc.HasState(parent.Hash()) { hashes = append(hashes, parent.Hash()) numbers = append(numbers, parent.Number64().Uint64()) @@ -1321,7 +1325,7 @@ func (bc *BlockChain) recoverAncestors(block block2.IBlock) (types.Hash, error) } // WriteBlockWithoutState without state -func (bc *BlockChain) WriteBlockWithoutState(block block2.IBlock) (err error) { +func (bc *BlockChain) WriteBlockWithoutState(block block2.IBlock, td *uint256.Int) (err error) { if bc.insertStopped() { return errInsertionInterrupted } @@ -1329,6 +1333,7 @@ func (bc *BlockChain) WriteBlockWithoutState(block block2.IBlock) (err error) { // return err //} return bc.ChainDB.Update(bc.ctx, func(tx kv.RwTx) error { + rawdb.WriteTd(tx, block.Hash(), block.Number64().Uint64(), td) if err := rawdb.WriteBlock(tx, block.(*block2.Block)); err != nil { return err } @@ -1403,7 +1408,7 @@ func (bc *BlockChain) writeBlockWithState(block block2.IBlock, receipts []*block if reorg { // Reorganise the chain if the parent is not the head block if block.ParentHash() != bc.CurrentBlock().Hash() { - if err := bc.reorg(nil, bc.CurrentBlock(), block); err != nil { + if err := bc.reorg(bc.CurrentBlock(), block); err != nil { return NonStatTy, err } } @@ -1443,7 +1448,6 @@ func (bc *BlockChain) writeHeadBlock(tx kv.RwTx, block block2.IBlock) error { // return err //} rawdb.WriteHeadBlockHash(tx, block.Hash()) - rawdb.WriteHeadBlockHash(tx, block.Hash()) rawdb.WriteTxLookupEntries(tx, block.(*block2.Block)) if err = rawdb.WriteCanonicalHash(tx, block.Hash(), block.Number64().Uint64()); nil != err { @@ -1525,33 +1529,20 @@ func (bc *BlockChain) addFutureBlock(block block2.IBlock) error { // writeKnownBlock updates the head block flag with a known block // and introduces chain reorg if necessary. -func (bc *BlockChain) writeKnownBlock(tx kv.RwTx, block block2.IBlock) error { - var notExternalTx bool +func (bc *BlockChain) writeKnownBlock(block block2.IBlock) error { var err error - if nil == tx { - tx, err = bc.ChainDB.BeginRw(bc.ctx) - if nil != err { - return err - } - defer tx.Rollback() - notExternalTx = true - } current := bc.CurrentBlock() if block.ParentHash() != current.Hash() { - if err := bc.reorg(tx, current, block); err != nil { + if err := bc.reorg(current, block); err != nil { return err } } - if err = bc.writeHeadBlock(tx, block); nil != err { - return err - } - if notExternalTx { - if err = tx.Commit(); nil != err { - return err - } - } - return nil + bc.DB().Update(bc.ctx, func(tx kv.RwTx) error { + err = bc.writeHeadBlock(tx, block) + return nil + }) + return err } // reorg takes two blocks, an old chain and a new chain and will reconstruct the @@ -1559,7 +1550,7 @@ func (bc *BlockChain) writeKnownBlock(tx kv.RwTx, block block2.IBlock) error { // potential missing transactions and post an event about them. // Note the new head block won't be processed here, callers need to handle it // externally. -func (bc *BlockChain) reorg(tx kv.RwTx, oldBlock, newBlock block2.IBlock) error { +func (bc *BlockChain) reorg(oldBlock, newBlock block2.IBlock) error { var ( newChain block2.Blocks oldChain block2.Blocks @@ -1591,17 +1582,7 @@ func (bc *BlockChain) reorg(tx kv.RwTx, oldBlock, newBlock block2.IBlock) error return fmt.Errorf("invalid new chain") } - var useExternalTx bool var err error - if tx == nil { - tx, err = bc.ChainDB.BeginRw(bc.ctx) - if nil != err { - return err - } - defer tx.Rollback() - useExternalTx = false - } - // Both sides of the reorg are at the same number, reduce both until the common // ancestor is found for { @@ -1620,12 +1601,12 @@ func (bc *BlockChain) reorg(tx kv.RwTx, oldBlock, newBlock block2.IBlock) error // Step back with both chains //oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.Number64().Uint64()-1) - oldBlock = rawdb.ReadBlock(tx, oldBlock.ParentHash(), oldBlock.Number64().Uint64()-1) + oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.Number64().Uint64()-1) if oldBlock == nil { return fmt.Errorf("invalid old chain") } //newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.Number64().Uint64()-1) - newBlock = rawdb.ReadBlock(tx, newBlock.ParentHash(), newBlock.Number64().Uint64()-1) + newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.Number64().Uint64()-1) if newBlock == nil { return fmt.Errorf("invalid new chain") } @@ -1652,45 +1633,103 @@ func (bc *BlockChain) reorg(tx kv.RwTx, oldBlock, newBlock block2.IBlock) error } // Insert the new chain(except the head block(reverse order)), // taking care of the proper incremental order. - for i := len(newChain) - 1; i >= 1; i-- { - // Insert the block in the canonical way, re-writing history - bc.writeHeadBlock(tx, newChain[i]) - // Collect the new added transactions. - for _, t := range newChain[i].Transactions() { - h := t.Hash() - addedTxs = append(addedTxs, h) + // collect oldblock event + // Deleted logs + blocks: + var deletedLogs []*block2.Log + recoverMap := make(map[types.Address]*uint256.Int) + for i := len(oldChain) - 1; i >= 0; i-- { + // Collect deleted logs for notification + if logs := bc.collectLogs(oldChain[i].(*block2.Block), true); len(logs) > 0 { + deletedLogs = append(deletedLogs, logs...) } - } - //return bc.ChainDB.Update(bc.ctx, func(txw kv.RwTx) error { - // Delete useless indexes right now which includes the non-canonical - // transaction indexes, canonical chain indexes which above the head. - for _, t := range types.HashDifference(deletedTxs, addedTxs) { - rawdb.DeleteTxLookupEntry(tx, t) - } + if bc.chainConfig.IsBeijing(oldChain[i].Number64().Uint64()) { + beijing, _ := uint256.FromBig(bc.chainConfig.BeijingBlock) + if new(uint256.Int).Mod(new(uint256.Int).Sub(oldChain[i].Number64(), beijing), uint256.NewInt(bc.engineConf.APos.RewardEpoch)). + Cmp(uint256.NewInt(0)) == 0 { + last := new(uint256.Int).Sub(oldChain[i].Number64(), uint256.NewInt(bc.engineConf.APos.RewardEpoch)) + rewardMap, err := bc.RewardsOfEpoch(oldChain[i].Number64(), last) + if nil != err { + return err + } - // Delete all hash markers that are not part of the new canonical chain. - // Because the reorg function does not handle new chain head, all hash - // markers greater than or equal to new chain head should be deleted. - number := commonBlock.Number64().Uint64() - if len(newChain) > 1 { - number = newChain[1].Number64().Uint64() - } - for i := number + 1; ; i++ { - hash, _ := rawdb.ReadCanonicalHash(tx, i) - if hash == (types.Hash{}) { - break + for _, r := range oldChain[i].Body().Reward() { + if _, ok := recoverMap[r.Address]; !ok { + recoverMap[r.Address], err = bc.GetAccountRewardUnpaid(r.Address) + if nil != err { + return err + } + } + + high := new(uint256.Int).Add(recoverMap[r.Address], r.Amount) + if _, ok := rewardMap[r.Address]; ok { + recoverMap[r.Address] = high.Sub(high, rewardMap[r.Address]) + } + } + } } - rawdb.TruncateCanonicalHash(tx, i, false) } - - if !useExternalTx { - if err = tx.Commit(); nil != err { + reInsert := make([]block2.IBlock, 0, len(newChain)) + err = bc.DB().Update(bc.ctx, func(tx kv.RwTx) error { + if err := state.UnwindState(context.Background(), tx, bc.CurrentBlock().Number64().Uint64(), newChain[len(newChain)-1].Number64().Uint64()); nil != err { + return fmt.Errorf("uwind state failed, start: %d, end: %d, %v", newChain[len(newChain)-1].Number64().Uint64(), bc.CurrentBlock().Number64().Uint64(), err) + } + if err := bc.writeHeadBlock(tx, commonBlock); nil != err { return err } + + for i := len(newChain) - 1; i >= 1; i-- { + // Collect the new added transactions. + for _, t := range newChain[i].Transactions() { + h := t.Hash() + addedTxs = append(addedTxs, h) + } + reInsert = append(reInsert, newChain[i]) + } + + //return bc.ChainDB.Update(bc.ctx, func(txw kv.RwTx) error { + // Delete useless indexes right now which includes the non-canonical + // transaction indexes, canonical chain indexes which above the head. + for _, t := range types.HashDifference(deletedTxs, addedTxs) { + rawdb.DeleteTxLookupEntry(tx, t) + } + + // Delete all hash markers that are not part of the new canonical chain. + // Because the reorg function does not handle new chain head, all hash + // markers greater than or equal to new chain head should be deleted. + number := commonBlock.Number64().Uint64() + if len(newChain) > 1 { + number = newChain[1].Number64().Uint64() + } + for i := number + 1; ; i++ { + hash, _ := rawdb.ReadCanonicalHash(tx, i) + if hash == (types.Hash{}) { + break + } + rawdb.TruncateCanonicalHash(tx, i, false) + } + + // unwind reward + for k, v := range recoverMap { + rawdb.PutAccountReward(tx, k, v) + } + return nil + }) + if nil != err { + return err + } + for i := len(oldChain) - 1; i >= 0; i-- { + // Also send event for blocks removed from the canon chain. + event.GlobalEvent.Send(&common.ChainSideEvent{Block: oldChain[i].(*block2.Block)}) + } + if len(deletedLogs) > 0 { + event.GlobalEvent.Send(&common.RemovedLogsEvent{Logs: deletedLogs}) } + if _, err := bc.insertChain(reInsert); nil != err { + return err + } return nil } func (bc *BlockChain) Quit() <-chan struct{} { @@ -1727,3 +1766,73 @@ func (bc *BlockChain) GetAccountRewardUnpaid(account types.Address) (*uint256.In }) return value, err } + +func (bc *BlockChain) RewardsOfEpoch(number *uint256.Int, lastEpoch *uint256.Int) (map[types.Address]*uint256.Int, error) { + // total reword for address this epoch + rewardMap := make(map[types.Address]*uint256.Int, 0) + depositeMap := map[types.Address]struct { + reward *uint256.Int + maxReward *uint256.Int + }{} + + currentNr := number.Clone() + currentNr = currentNr.Sub(currentNr, uint256.NewInt(1)) + endNumber := lastEpoch.Clone() + for currentNr.Cmp(endNumber) >= 0 { + block, err := bc.GetBlockByNumber(currentNr) + if nil != err { + return nil, err + } + + verifiers := block.Body().Verifier() + for _, verifier := range verifiers { + _, ok := depositeMap[verifier.Address] + if !ok { + low, max := bc.GetDepositInfo(verifier.Address) + if low == nil || max == nil { + continue + } + depositeMap[verifier.Address] = struct { + reward *uint256.Int + maxReward *uint256.Int + }{reward: low, maxReward: max} + + log.Debug("account deposite infos", "addr", verifier.Address, "perblock", low, "perepoch", max) + } + + addrReward, ok := rewardMap[verifier.Address] + if !ok { + addrReward = uint256.NewInt(0) + } + + rewardMap[verifier.Address] = math.Min256(addrReward.Add(addrReward, depositeMap[verifier.Address].reward), depositeMap[verifier.Address].maxReward.Clone()) + } + + currentNr.SubUint64(currentNr, 1) + } + return rewardMap, nil +} + +// collectLogs collects the logs that were generated or removed during +// the processing of a block. These logs are later announced as deleted or reborn. +func (bc *BlockChain) collectLogs(b *block2.Block, removed bool) []*block2.Log { + var receipts block2.Receipts + bc.DB().View(context.Background(), func(tx kv.Tx) error { + receipts = rawdb.ReadRawReceipts(tx, b.Number64().Uint64()) + return nil + }) + + receipts.DeriveFields(bc.chainConfig, b.Hash(), b.Number64().Uint64(), b.Transactions()) + + var logs []*block2.Log + for _, receipt := range receipts { + for _, log := range receipt.Logs { + l := *log + if removed { + l.Removed = true + } + logs = append(logs, &l) + } + } + return logs +} diff --git a/internal/consensus/apos/consensus.go b/internal/consensus/apos/consensus.go index c4035c6..48080f5 100644 --- a/internal/consensus/apos/consensus.go +++ b/internal/consensus/apos/consensus.go @@ -2,7 +2,6 @@ package apos import ( "github.com/amazechain/amc/common/block" - "github.com/amazechain/amc/common/math" "github.com/amazechain/amc/common/types" "github.com/amazechain/amc/conf" "github.com/amazechain/amc/internal/consensus" @@ -15,49 +14,53 @@ import ( func AccumulateRewards(r *Reward, number *uint256.Int, chain consensus.ChainHeaderReader) (map[types.Address]*uint256.Int, map[types.Address]*uint256.Int, error) { - rewardMap := make(map[types.Address]*uint256.Int, 0) + // total reword for address this epoch + //var rewardMap map[types.Address]*uint256.Int unpayMap := make(map[types.Address]*uint256.Int, 0) - endNumber := new(uint256.Int).Sub(number, r.rewardEpoch) - //calculate last batch but this one - currentNr := number.Clone() - currentNr.SubUint64(currentNr, 1) - - depositeMap := map[types.Address]struct { - reward *uint256.Int - maxReward *uint256.Int - }{} - for currentNr.Cmp(endNumber) >= 0 { - block, err := chain.GetBlockByNumber(currentNr) - if nil != err { - return nil, nil, err - } - - verifiers := block.Body().Verifier() - for _, verifier := range verifiers { - _, ok := depositeMap[verifier.Address] - if !ok { - low, max := chain.GetDepositInfo(verifier.Address) - if low == nil || max == nil { - continue - } - depositeMap[verifier.Address] = struct { - reward *uint256.Int - maxReward *uint256.Int - }{reward: low, maxReward: max} - - log.Debug("account deposite infos", "addr", verifier.Address, "perblock", low, "perepoch", max) - } - - addrReward, ok := rewardMap[verifier.Address] - if !ok { - addrReward = uint256.NewInt(0) - } - - rewardMap[verifier.Address] = math.Min256(addrReward.Add(addrReward, depositeMap[verifier.Address].reward), depositeMap[verifier.Address].maxReward.Clone()) - } - - currentNr.SubUint64(currentNr, 1) + ////calculate last batch but this one + //currentNr := number.Clone() + //currentNr.SubUint64(currentNr, 1) + // + //depositeMap := map[types.Address]struct { + // reward *uint256.Int + // maxReward *uint256.Int + //}{} + //for currentNr.Cmp(endNumber) >= 0 { + // block, err := chain.GetBlockByNumber(currentNr) + // if nil != err { + // return nil, nil, err + // } + // + // verifiers := block.Body().Verifier() + // for _, verifier := range verifiers { + // _, ok := depositeMap[verifier.Address] + // if !ok { + // low, max := chain.GetDepositInfo(verifier.Address) + // if low == nil || max == nil { + // continue + // } + // depositeMap[verifier.Address] = struct { + // reward *uint256.Int + // maxReward *uint256.Int + // }{reward: low, maxReward: max} + // + // log.Debug("account deposite infos", "addr", verifier.Address, "perblock", low, "perepoch", max) + // } + // + // addrReward, ok := rewardMap[verifier.Address] + // if !ok { + // addrReward = uint256.NewInt(0) + // } + // + // rewardMap[verifier.Address] = math.Min256(addrReward.Add(addrReward, depositeMap[verifier.Address].reward), depositeMap[verifier.Address].maxReward.Clone()) + // } + // + // currentNr.SubUint64(currentNr, 1) + //} + rewardMap, err := chain.RewardsOfEpoch(number, endNumber) + if nil != err { + return nil, nil, err } for addr, amount := range rewardMap { diff --git a/internal/consensus/consensus.go b/internal/consensus/consensus.go index 1e40183..4a9f4fa 100644 --- a/internal/consensus/consensus.go +++ b/internal/consensus/consensus.go @@ -54,6 +54,7 @@ type ChainHeaderReader interface { GetDepositInfo(address types.Address) (*uint256.Int, *uint256.Int) GetAccountRewardUnpaid(account types.Address) (*uint256.Int, error) + RewardsOfEpoch(number *uint256.Int, lastEpoch *uint256.Int) (map[types.Address]*uint256.Int, error) } // ChainReader defines a small collection of methods needed to access the local diff --git a/internal/forkchoice.go b/internal/forkchoice.go index 7409946..e2cfb76 100644 --- a/internal/forkchoice.go +++ b/internal/forkchoice.go @@ -18,6 +18,7 @@ package internal import ( crand "crypto/rand" + "errors" "github.com/holiman/uint256" "math" "math/big" @@ -79,9 +80,9 @@ func (f *ForkChoice) ReorgNeeded(current block2.IHeader, header block2.IHeader) externTd = f.chain.GetTd(header.Hash(), header.Number64()) ) log.Tracef("ForkChoice.ReorgNeeded: localID = %d, externTd = %d", localTD.Uint64(), externTd.Uint64()) - //if localTD == nil || externTd == nil { - // return false, errors.New("missing td") - //} + if localTD == nil || externTd == nil { + return false, errors.New("missing td") + } // Accept the new header as the chain head if the transition // is already triggered. We assume all the headers after the // transition come from the trusted consensus layer. @@ -89,6 +90,13 @@ func (f *ForkChoice) ReorgNeeded(current block2.IHeader, header block2.IHeader) // return true, nil //} // If the total difficulty is higher than our known, add it to the canonical chain + + if diff := externTd.Cmp(localTD); diff > 0 { + return true, nil + } else if diff < 0 { + return false, nil + } + // Second clause in the if statement reduces the vulnerability to selfish mining. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf reorg := externTd.Cmp(localTD) > 0 diff --git a/internal/node/node.go b/internal/node/node.go index bccec16..0c38bb6 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -216,7 +216,7 @@ func NewNode(ctx context.Context, cfg *conf.Config) (*Node, error) { return nil, fmt.Errorf("invalid engine name %s", cfg.GenesisBlockCfg.Engine.EngineName) } - bc, _ := internal.NewBlockChain(ctx, genesisBlock, engine, downloader, chainKv, pubsubServer, cfg.GenesisBlockCfg.Config) + bc, _ := internal.NewBlockChain(ctx, genesisBlock, engine, downloader, chainKv, pubsubServer, cfg) pool, _ := txspool.NewTxsPool(ctx, bc) //todo diff --git a/modules/changeset/storage_changeset.go b/modules/changeset/storage_changeset.go index d0dea76..0da7e5b 100644 --- a/modules/changeset/storage_changeset.go +++ b/modules/changeset/storage_changeset.go @@ -96,7 +96,7 @@ func FindStorage(c kv.CursorDupSort, blockNumber uint64, k []byte) ([]byte, erro // RewindDataPlain generates rewind data for all plain buckets between the timestamp // timestapSrc is the current timestamp, and timestamp Dst is where we rewind func RewindData(db kv.Tx, timestampSrc, timestampDst uint64, changes *etl.Collector, quit <-chan struct{}) error { - if err := walkAndCollect( + if err := WalkAndCollect( changes.Collect, db, modules.AccountChangeSet, timestampDst+1, timestampSrc, @@ -105,7 +105,7 @@ func RewindData(db kv.Tx, timestampSrc, timestampDst uint64, changes *etl.Collec return err } - if err := walkAndCollect( + if err := WalkAndCollect( changes.Collect, db, modules.StorageChangeSet, timestampDst+1, timestampSrc, @@ -117,7 +117,7 @@ func RewindData(db kv.Tx, timestampSrc, timestampDst uint64, changes *etl.Collec return nil } -func walkAndCollect(collectorFunc func([]byte, []byte) error, db kv.Tx, bucket string, timestampDst, timestampSrc uint64, quit <-chan struct{}) error { +func WalkAndCollect(collectorFunc func([]byte, []byte) error, db kv.Tx, bucket string, timestampDst, timestampSrc uint64, quit <-chan struct{}) error { return ForRange(db, bucket, timestampDst, timestampSrc+1, func(bl uint64, k, v []byte) error { if err := libcommon.Stopped(quit); err != nil { return err diff --git a/modules/rawdb/accessors_chain.go b/modules/rawdb/accessors_chain.go index 537c6fd..924bd6a 100644 --- a/modules/rawdb/accessors_chain.go +++ b/modules/rawdb/accessors_chain.go @@ -28,7 +28,6 @@ import ( "github.com/amazechain/amc/log" "github.com/amazechain/amc/modules" "github.com/golang/protobuf/proto" - "github.com/holiman/uint256" "math" "time" @@ -384,14 +383,16 @@ func ReadBodyByNumber(db kv.Tx, number uint64) (*block.Body, uint64, uint32, err } func ReadBodyWithTransactions(db kv.Getter, hash types.Hash, number uint64) (*block.Body, error) { - canonicalHash, err := ReadCanonicalHash(db, number) - if err != nil { - return nil, fmt.Errorf("read canonical hash failed: %d, %w", number, err) + body, baseTxId, txAmount := ReadBody(db, hash, number) + if body == nil { + return nil, nil } - if canonicalHash == hash { - return ReadCanonicalBodyWithTransactions(db, hash, number), nil + var err error + body.Txs, err = CanonicalTransactions(db, baseTxId, txAmount) + if err != nil { + return nil, err } - return nil, fmt.Errorf("mismatch hash: %v", hash) + return body, err } func ReadCanonicalBodyWithTransactions(db kv.Getter, hash types.Hash, number uint64) *block.Body { diff --git a/modules/rawdb/accessors_deposit.go b/modules/rawdb/accessors_deposit.go index b4ba822..1d840e8 100644 --- a/modules/rawdb/accessors_deposit.go +++ b/modules/rawdb/accessors_deposit.go @@ -20,28 +20,12 @@ import ( "fmt" "github.com/amazechain/amc/common/crypto/bls" "github.com/amazechain/amc/common/types" + "github.com/amazechain/amc/common/u256" "github.com/amazechain/amc/modules" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" ) -//// PutDeposit -//func PutDeposit(db kv.Putter, key []byte, val []byte) error { -// return db.Put(modules.Deposit, key, val) -//} - -func PutDeposit(db kv.Putter, addr types.Address, pub types.PublicKey, amount uint256.Int) error { - - data := make([]byte, types.PublicKeyLength+amount.ByteLen()) - copy(data[:types.PublicKeyLength], pub.Bytes()) - copy(data[types.PublicKeyLength:], amount.Bytes()) - // - if err := db.Put(modules.Deposit, addr[:], data); err != nil { - return fmt.Errorf("failed to store address Deposit: %w", err) - } - return nil -} - // GetDeposit func GetDeposit(db kv.Getter, addr types.Address) (types.PublicKey, *uint256.Int, error) { valBytes, err := db.GetOne(modules.Deposit, addr[:]) @@ -64,15 +48,66 @@ func GetDeposit(db kv.Getter, addr types.Address) (types.PublicKey, *uint256.Int return *pubkey, amount, nil } -// DeleteDeposit removes Deposit data associated with an address. -func DeleteDeposit(db kv.Deleter, addr types.Address) error { - return db.Delete(modules.Deposit, addr[:]) +func DoDeposit(db kv.Putter, addr types.Address, pub types.PublicKey, amount *uint256.Int) error { + + data := make([]byte, types.PublicKeyLength+amount.ByteLen()) + copy(data[:types.PublicKeyLength], pub.Bytes()) + copy(data[types.PublicKeyLength:], amount.Bytes()) + // + if err := db.Put(modules.Deposit, addr[:], data); err != nil { + return fmt.Errorf("failed to store address Deposit: %w", err) + } + return nil +} + +func UndoDeposit(db kv.Putter, addr types.Address, amount *uint256.Int) error { + pub, dbAmount, err := GetDeposit(db.(kv.Getter), addr) + if err != nil { + return fmt.Errorf("cannot get deposit") + } + if dbAmount.Cmp(amount) != 0 { + return fmt.Errorf("dbAmount != withdrawnAmount") + } + return DoDeposit(db, addr, pub, new(uint256.Int)) +} + +// DoWithdrawn removes Deposit data associated with an address. +func DoWithdrawn(db kv.RwTx, addr types.Address, withdrawnAmount *uint256.Int) error { + pub, dbAmount, err := GetDeposit(db, addr) + if err != nil { + return fmt.Errorf("cannot get deposit") + } + if dbAmount.Cmp(withdrawnAmount) != 0 { + return fmt.Errorf("dbAmount != withdrawnAmount") + } + return DoDeposit(db, addr, pub, new(uint256.Int)) +} + +func UndoWithdrawn(db kv.RwTx, addr types.Address, unDoWithdrawnAmount *uint256.Int) error { + pub, dbAmount, err := GetDeposit(db, addr) + if err != nil { + return fmt.Errorf("cannot get deposit") + } + if dbAmount.Cmp(u256.Num0) != 0 { + return fmt.Errorf("db deposit != 0 when undo withdrawn") + } + return DoDeposit(db, addr, pub, unDoWithdrawnAmount) } // IsDeposit is deposit account func IsDeposit(db kv.Getter, addr types.Address) bool { - is, _ := db.Has(modules.Deposit, addr[:]) - return is + // + //is, _ := db.Has(modules.Deposit, addr[:]) + _, amount, err := GetDeposit(db, addr) + if err != nil { + return false + } + + if amount.Cmp(u256.Num0) == 0 { + return false + } + // + return true } func DepositNum(tx kv.Tx) (uint64, error) { diff --git a/modules/state/history.go b/modules/state/history.go index 82b0b66..54e76e1 100644 --- a/modules/state/history.go +++ b/modules/state/history.go @@ -18,17 +18,28 @@ package state import ( "bytes" + "context" "encoding/binary" "errors" "fmt" "github.com/RoaringBitmap/roaring/roaring64" "github.com/amazechain/amc/common/account" "github.com/amazechain/amc/common/types" + "github.com/amazechain/amc/log" "github.com/amazechain/amc/modules" "github.com/amazechain/amc/modules/changeset" "github.com/amazechain/amc/modules/ethdb" "github.com/amazechain/amc/modules/ethdb/bitmapdb" + "github.com/amazechain/amc/modules/rawdb" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/dbg" + "github.com/ledgerwatch/erigon-lib/common/hexutility" + "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" + "golang.org/x/exp/slices" + "os" + "runtime" + "time" ) func GetAsOf(tx kv.Tx, indexC kv.Cursor, changesC kv.CursorDupSort, storage bool, key []byte, timestamp uint64) ([]byte, error) { @@ -355,5 +366,254 @@ func WalkAsOfAccounts(tx kv.Tx, startAddress types.Address, timestamp uint64, wa } } return err +} + +func UnwindState(ctx context.Context, tx kv.RwTx, current, unwindPoint uint64) error { + // unwind history + if err := UnwindHistory(tx, modules.AccountChangeSet, unwindPoint, ctx.Done()); nil != err { + return fmt.Errorf("unwind AccountHistoryIndex failed, %v", err) + } + + if err := UnwindHistory(tx, modules.StorageChangeSet, unwindPoint, ctx.Done()); nil != err { + return fmt.Errorf("unwind StorageHistoryIndex failed, %v", err) + } + + // unwind account and storage + storageKeyLength := types.AddressLength + types.IncarnationLength + types.HashLength + changes := etl.NewCollector("unwind state", os.TempDir(), etl.NewOldestEntryBuffer(etl.BufferOptimalSize)) + defer changes.Close() + //errRewind := changeset.RewindData(tx, current, unwindPoint, changes, ctx.Done()) + //if errRewind != nil { + // return fmt.Errorf("getting rewind data: %w", errRewind) + //} + if err := changeset.WalkAndCollect( + changes.Collect, + tx, modules.AccountChangeSet, + unwindPoint, current, + ctx.Done(), + ); err != nil { + return fmt.Errorf("getting account rewind data: %w", err) + } + if err := changes.Load(tx, modules.Account, func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + if len(k) == 20 { + if len(v) > 0 { + var acc account.StateAccount + if err := acc.DecodeForStorage(v); err != nil { + return err + } + + // Fetch the code hash + recoverCodeHashPlain(&acc, tx, k) + var address types.Address + copy(address[:], k) + + // cleanup contract code bucket + original, err := NewPlainStateReader(tx).ReadAccountData(address) + if err != nil { + return fmt.Errorf("read account for %x: %w", address, err) + } + if original != nil { + // clean up all the code incarnations original incarnation and the new one + for incarnation := original.Incarnation; incarnation > acc.Incarnation && incarnation > 0; incarnation-- { + err = tx.Delete(kv.PlainContractCode, modules.PlainGenerateStoragePrefix(address[:], incarnation)) + if err != nil { + return fmt.Errorf("writeAccountPlain for %x: %w", address, err) + } + } + } + + newV := make([]byte, acc.EncodingLengthForStorage()) + acc.EncodeForStorage(newV) + //if accumulator != nil { + // accumulator.ChangeAccount(address, acc.Incarnation, newV) + //} + if err := next(k, k, newV); err != nil { + return err + } + } else { + //if accumulator != nil { + // var address common.Address + // copy(address[:], k) + // accumulator.DeleteAccount(address) + //} + if err := next(k, k, nil); err != nil { + return err + } + } + return nil + } + //if accumulator != nil { + // var address common.Address + // var incarnation uint64 + // var location common.Hash + // copy(address[:], k[:length.Addr]) + // incarnation = binary.BigEndian.Uint64(k[length.Addr:]) + // copy(location[:], k[length.Addr+length.Incarnation:]) + // log.Debug(fmt.Sprintf("un ch st: %x, %d, %x, %x\n", address, incarnation, location, common.Copy(v))) + // accumulator.ChangeStorage(address, incarnation, location, common.Copy(v)) + //} + if len(v) > 0 { + if err := next(k, k[:storageKeyLength], v); err != nil { + return err + } + } else { + if err := next(k, k[:storageKeyLength], nil); err != nil { + return err + } + } + return nil + + }, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + return err + } + if err := changeset.WalkAndCollect( + changes.Collect, + tx, modules.StorageChangeSet, + unwindPoint, current, + ctx.Done(), + ); err != nil { + return fmt.Errorf("getting storage rewind data: %w", err) + } + if err := changes.Load(tx, modules.Storage, func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + if len(k) == 20 { + if len(v) > 0 { + var acc account.StateAccount + if err := acc.DecodeForStorage(v); err != nil { + return err + } + + // Fetch the code hash + recoverCodeHashPlain(&acc, tx, k) + var address types.Address + copy(address[:], k) + + // cleanup contract code bucket + original, err := NewPlainStateReader(tx).ReadAccountData(address) + if err != nil { + return fmt.Errorf("read account for %x: %w", address, err) + } + if original != nil { + // clean up all the code incarnations original incarnation and the new one + for incarnation := original.Incarnation; incarnation > acc.Incarnation && incarnation > 0; incarnation-- { + err = tx.Delete(kv.PlainContractCode, modules.PlainGenerateStoragePrefix(address[:], incarnation)) + if err != nil { + return fmt.Errorf("writeAccountPlain for %x: %w", address, err) + } + } + } + + newV := make([]byte, acc.EncodingLengthForStorage()) + acc.EncodeForStorage(newV) + //if accumulator != nil { + // accumulator.ChangeAccount(address, acc.Incarnation, newV) + //} + if err := next(k, k, newV); err != nil { + return err + } + } else { + //if accumulator != nil { + // var address common.Address + // copy(address[:], k) + // accumulator.DeleteAccount(address) + //} + if err := next(k, k, nil); err != nil { + return err + } + } + return nil + } + //if accumulator != nil { + // var address common.Address + // var incarnation uint64 + // var location common.Hash + // copy(address[:], k[:length.Addr]) + // incarnation = binary.BigEndian.Uint64(k[length.Addr:]) + // copy(location[:], k[length.Addr+length.Incarnation:]) + // log.Debug(fmt.Sprintf("un ch st: %x, %d, %x, %x\n", address, incarnation, location, common.Copy(v))) + // accumulator.ChangeStorage(address, incarnation, location, common.Copy(v)) + //} + if len(v) > 0 { + if err := next(k, k[:storageKeyLength], v); err != nil { + return err + } + } else { + if err := next(k, k[:storageKeyLength], nil); err != nil { + return err + } + } + return nil + }, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + return err + } + + // unwind changeSet + if err := changeset.Truncate(tx, unwindPoint); err != nil { + return err + } + + // delete receipt and log + if err := rawdb.TruncateReceipts(tx, unwindPoint); err != nil { + return fmt.Errorf("truncate receipts: %w", err) + } + //if err := rawdb.TruncateBorReceipts(tx, u.UnwindPoint+1); err != nil { + // return fmt.Errorf("truncate bor receipts: %w", err) + //} + //if err := rawdb.DeleteNewerEpochs(tx, u.UnwindPoint+1); err != nil { + // return fmt.Errorf("delete newer epochs: %w", err) + //} + return nil +} + +func recoverCodeHashPlain(acc *account.StateAccount, db kv.Tx, key []byte) { + var address types.Address + copy(address[:], key) + if acc.Incarnation > 0 && acc.IsEmptyCodeHash() { + if codeHash, err2 := db.GetOne(modules.PlainContractCode, modules.PlainGenerateStoragePrefix(address[:], acc.Incarnation)); err2 == nil { + copy(acc.CodeHash[:], codeHash) + } + } +} + +func UnwindHistory(db kv.RwTx, csBucket string, to uint64, quitCh <-chan struct{}) error { + logEvery := time.NewTicker(30 * time.Second) + defer logEvery.Stop() + + updates := map[string]struct{}{} + if err := changeset.ForEach(db, csBucket, hexutility.EncodeTs(to), func(blockN uint64, k, v []byte) error { + select { + case <-logEvery.C: + var m runtime.MemStats + dbg.ReadMemStats(&m) + log.Info(fmt.Sprintf("[%s] Progress", "unwindhistory"), "number", blockN, "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys)) + case <-quitCh: + return common.ErrStopped + default: + } + k = modules.CompositeKeyWithoutIncarnation(k) + updates[string(k)] = struct{}{} + return nil + }); err != nil { + return err + } + + if err := truncateBitmaps64(db, changeset.Mapper[csBucket].IndexBucket, updates, to); err != nil { + return err + } + return nil +} + +func truncateBitmaps64(tx kv.RwTx, bucket string, inMem map[string]struct{}, to uint64) error { + keys := make([]string, 0, len(inMem)) + for k := range inMem { + keys = append(keys, k) + } + slices.Sort(keys) + for _, k := range keys { + if err := bitmapdb.TruncateRange64(tx, bucket, []byte(k), to+1); err != nil { + return fmt.Errorf("fail TruncateRange: bucket=%s, %w", bucket, err) + } + } + + return nil } diff --git a/modules/table.go b/modules/table.go index 9869186..9695b26 100644 --- a/modules/table.go +++ b/modules/table.go @@ -169,6 +169,8 @@ var AmcTableCfg = kv.TableCfg{ DupFromLen: 54, DupToLen: 34, }, + // + Deposit: {Flags: kv.DupSort}, } func AmcInit() {