diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go
index 903685dafc1..ac233e32f05 100644
--- a/cmd/evm/internal/t8ntool/transition.go
+++ b/cmd/evm/internal/t8ntool/transition.go
@@ -657,11 +657,11 @@ func CalculateStateRoot(tx kv.RwTx) (*libcommon.Hash, error) {
h.Sha.Write(k[length.Addr+length.Incarnation:])
//nolint:errcheck
h.Sha.Read(newK[length.Hash+length.Incarnation:])
- if err = tx.Put(kv.HashedStorage, newK, libcommon.CopyBytes(v)); err != nil {
+ if err = tx.Put(kv.HashedStorageDeprecated, newK, libcommon.CopyBytes(v)); err != nil {
return nil, fmt.Errorf("insert hashed key: %w", err)
}
} else {
- if err = tx.Put(kv.HashedAccounts, newK, libcommon.CopyBytes(v)); err != nil {
+ if err = tx.Put(kv.HashedAccountsDeprecated, newK, libcommon.CopyBytes(v)); err != nil {
return nil, fmt.Errorf("insert hashed key: %w", err)
}
}
diff --git a/cmd/integration/commands/refetence_db.go b/cmd/integration/commands/refetence_db.go
index 088cb931831..431ae1c03fa 100644
--- a/cmd/integration/commands/refetence_db.go
+++ b/cmd/integration/commands/refetence_db.go
@@ -41,12 +41,10 @@ import (
)
var stateBuckets = []string{
- kv.HashedAccounts,
- kv.HashedStorage,
+ kv.HashedAccountsDeprecated,
+ kv.HashedStorageDeprecated,
kv.ContractCode,
kv.PlainState,
- kv.AccountChangeSet,
- kv.StorageChangeSet,
kv.PlainContractCode,
kv.IncarnationMap,
kv.Code,
diff --git a/cmd/pics/state.go b/cmd/pics/state.go
index e3c75c30275..f2bfc5ac015 100644
--- a/cmd/pics/state.go
+++ b/cmd/pics/state.go
@@ -86,27 +86,23 @@ import (
}*/
var bucketLabels = map[string]string{
- kv.Receipts: "Receipts",
- kv.Log: "Event Logs",
- kv.E2AccountsHistory: "History Of Accounts",
- kv.E2StorageHistory: "History Of Storage",
- kv.Headers: "Headers",
- kv.HeaderCanonical: "Canonical headers",
- kv.HeaderTD: "Headers TD",
- kv.BlockBody: "Block Bodies",
- kv.HeaderNumber: "Header Numbers",
- kv.TxLookup: "Transaction Index",
- kv.Code: "Code Of Contracts",
- kv.SyncStageProgress: "Sync Progress",
- kv.PlainState: "Plain State",
- kv.HashedAccounts: "Hashed Accounts",
- kv.HashedStorage: "Hashed Storage",
- kv.TrieOfAccounts: "Intermediate Hashes Of Accounts",
- kv.TrieOfStorage: "Intermediate Hashes Of Storage",
- kv.AccountChangeSet: "Account Changes",
- kv.StorageChangeSet: "Storage Changes",
- kv.IncarnationMap: "Incarnations",
- kv.Senders: "Transaction Senders",
+ kv.Receipts: "Receipts",
+ kv.Log: "Event Logs",
+ kv.Headers: "Headers",
+ kv.HeaderCanonical: "Canonical headers",
+ kv.HeaderTD: "Headers TD",
+ kv.BlockBody: "Block Bodies",
+ kv.HeaderNumber: "Header Numbers",
+ kv.TxLookup: "Transaction Index",
+ kv.Code: "Code Of Contracts",
+ kv.SyncStageProgress: "Sync Progress",
+ kv.PlainState: "Plain State",
+ kv.HashedAccountsDeprecated: "Hashed Accounts",
+ kv.HashedStorageDeprecated: "Hashed Storage",
+ kv.TrieOfAccounts: "Intermediate Hashes Of Accounts",
+ kv.TrieOfStorage: "Intermediate Hashes Of Storage",
+ kv.IncarnationMap: "Incarnations",
+ kv.Senders: "Transaction Senders",
}
/*dbutils.PlainContractCode,
diff --git a/cmd/state/commands/check_enc.go b/cmd/state/commands/check_enc.go
deleted file mode 100644
index b39129a6761..00000000000
--- a/cmd/state/commands/check_enc.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2024 The Erigon Authors
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package commands
-
-import (
- "github.com/erigontech/erigon/cmd/state/verify"
- "github.com/spf13/cobra"
-)
-
-func init() {
- withDataDir(checkEncCmd)
- withStatsfile(checkEncCmd)
- rootCmd.AddCommand(checkEncCmd)
-}
-
-var checkEncCmd = &cobra.Command{
- Use: "checkEnc",
- Short: "Check changesets Encoding",
- RunE: func(cmd *cobra.Command, args []string) error {
- return verify.CheckEnc(chaindata)
- },
-}
diff --git a/cmd/state/commands/check_index.go b/cmd/state/commands/check_index.go
deleted file mode 100644
index e1e5db6abd7..00000000000
--- a/cmd/state/commands/check_index.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2024 The Erigon Authors
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package commands
-
-import (
- "github.com/erigontech/erigon-lib/common"
- "github.com/erigontech/erigon/cmd/state/verify"
- "github.com/spf13/cobra"
-)
-
-func init() {
- withDataDir(checkIndexCMD)
- withIndexBucket(checkIndexCMD)
- withCSBucket(checkIndexCMD)
- rootCmd.AddCommand(checkIndexCMD)
-}
-
-var checkIndexCMD = &cobra.Command{
- Use: "checkIndex",
- Short: "Index checker",
- RunE: func(cmd *cobra.Command, args []string) error {
- ctx, _ := common.RootContext()
- return verify.CheckIndex(ctx, chaindata, changeSetBucket, indexBucket)
- },
-}
diff --git a/cmd/state/commands/global_flags_vars.go b/cmd/state/commands/global_flags_vars.go
index 43d0364df9b..df1656abf53 100644
--- a/cmd/state/commands/global_flags_vars.go
+++ b/cmd/state/commands/global_flags_vars.go
@@ -25,15 +25,12 @@ import (
)
var (
- datadirCli string
- chaindata string
- statsfile string
- block uint64
- changeSetBucket string
- indexBucket string
- snapshotsCli bool
- chain string
- logdir string
+ datadirCli string
+ chaindata string
+ statsfile string
+ block uint64
+ indexBucket string
+ chain string
)
func must(err error) {
@@ -59,14 +56,6 @@ func withStatsfile(cmd *cobra.Command) {
must(cmd.MarkFlagFilename("statsfile", "csv"))
}
-func withCSBucket(cmd *cobra.Command) {
- cmd.Flags().StringVar(&changeSetBucket, "changeset-bucket", kv.AccountChangeSet, kv.AccountChangeSet+" for account and "+kv.StorageChangeSet+" for storage")
-}
-
func withIndexBucket(cmd *cobra.Command) {
cmd.Flags().StringVar(&indexBucket, "index-bucket", kv.E2AccountsHistory, kv.E2AccountsHistory+" for account and "+kv.E2StorageHistory+" for storage")
}
-
-func withChain(cmd *cobra.Command) {
- cmd.Flags().StringVar(&chain, "chain", "", "pick a chain to assume (mainnet, sepolia, etc.)")
-}
diff --git a/cmd/state/verify/check_changeset_enc.go b/cmd/state/verify/check_changeset_enc.go
deleted file mode 100644
index 25adbd66aaf..00000000000
--- a/cmd/state/verify/check_changeset_enc.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2024 The Erigon Authors
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package verify
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "fmt"
- "runtime"
- "sync/atomic"
- "time"
-
- "github.com/erigontech/erigon-lib/common"
-
- "github.com/erigontech/erigon-lib/kv"
- "github.com/erigontech/erigon-lib/kv/mdbx"
- historyv22 "github.com/erigontech/erigon-lib/kv/temporal/historyv2"
- "golang.org/x/sync/errgroup"
-)
-
-type Walker interface {
- Walk(f func(k, v []byte) error) error
- Find(k []byte) ([]byte, error)
-}
-
-func CheckEnc(chaindata string) error {
- db := mdbx.MustOpen(chaindata)
- defer db.Close()
- var (
- currentSize uint64
- newSize uint64
- )
-
- //set test methods
- chainDataStorageDecoder := historyv22.Mapper[kv.StorageChangeSet].Decode
- testStorageEncoder := historyv22.Mapper[kv.StorageChangeSet].Encode
- testStorageDecoder := historyv22.Mapper[kv.StorageChangeSet].Decode
-
- startTime := time.Now()
- ch := make(chan struct {
- k []byte
- v []byte
- })
- stop := make(chan struct{})
- //run workers
- g, ctx := errgroup.WithContext(context.Background())
- for i := 0; i < runtime.NumCPU()-1; i++ {
- g.Go(func() error {
- for {
- select {
- case v := <-ch:
- blockNum, kk, vv, err := chainDataStorageDecoder(v.k, v.v)
- if err != nil {
- return err
- }
- cs := historyv22.NewStorageChangeSet()
- _ = cs.Add(v.k, v.v)
- atomic.AddUint64(¤tSize, uint64(len(v.v)))
- innerErr := testStorageEncoder(blockNum, cs, func(k, v []byte) error {
- atomic.AddUint64(&newSize, uint64(len(v)))
- _, a, b, err := testStorageDecoder(k, v)
- if err != nil {
- return err
- }
- if !bytes.Equal(kk, a) {
- return fmt.Errorf("incorrect order. block: %d", blockNum)
- }
- if !bytes.Equal(vv, b) {
- return fmt.Errorf("incorrect value. block: %d, key: %x", blockNum, a)
- }
- return nil
- })
- if innerErr != nil {
- return innerErr
- }
-
- case <-ctx.Done():
- return nil
- case <-stop:
- return nil
- }
- }
- })
- }
-
- g.Go(func() error {
- var i uint64
- defer func() {
- close(stop)
- }()
- return db.View(context.Background(), func(tx kv.Tx) error {
- return tx.ForEach(kv.StorageChangeSet, []byte{}, func(k, v []byte) error {
- if i%100_000 == 0 {
- blockNum := binary.BigEndian.Uint64(k)
- fmt.Printf("Processed %s, block number %d, current %d, new %d, time %s\n",
- common.PrettyCounter(i),
- blockNum,
- atomic.LoadUint64(¤tSize),
- atomic.LoadUint64(&newSize),
- time.Since(startTime))
- }
-
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
-
- }
-
- i++
- ch <- struct {
- k []byte
- v []byte
- }{k: k, v: v}
- return nil
- })
- })
- })
- err := g.Wait()
- if err != nil {
- return err
- }
-
- fmt.Println("-- Final size --")
- fmt.Println("Current:", currentSize)
- fmt.Println("New:", newSize)
-
- return nil
-}
diff --git a/cmd/state/verify/check_indexes.go b/cmd/state/verify/check_indexes.go
deleted file mode 100644
index 4e29551f26c..00000000000
--- a/cmd/state/verify/check_indexes.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2024 The Erigon Authors
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package verify
-
-import (
- "context"
- "fmt"
- "time"
-
- libcommon "github.com/erigontech/erigon-lib/common"
-
- "github.com/erigontech/erigon-lib/kv/dbutils"
-
- "github.com/erigontech/erigon-lib/common"
- "github.com/erigontech/erigon-lib/kv/bitmapdb"
- "github.com/erigontech/erigon-lib/kv/mdbx"
- "github.com/erigontech/erigon-lib/kv/temporal/historyv2"
-)
-
-func CheckIndex(ctx context.Context, chaindata string, changeSetBucket string, indexBucket string) error {
- db := mdbx.MustOpen(chaindata)
- defer db.Close()
- tx, err := db.BeginRo(context.Background())
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- startTime := time.Now()
-
- i := 0
- if err := historyv2.ForEach(tx, changeSetBucket, nil, func(blockN uint64, k, v []byte) error {
- i++
- if i%100_000 == 0 {
- fmt.Printf("Processed %s, %s\n", libcommon.PrettyCounter(blockN), time.Since(startTime))
- }
- select {
- default:
- case <-ctx.Done():
- return ctx.Err()
- }
-
- bm, innerErr := bitmapdb.Get64(tx, indexBucket, dbutils.CompositeKeyWithoutIncarnation(k), blockN-1, blockN+1)
- if innerErr != nil {
- return innerErr
- }
- if !bm.Contains(blockN) {
- return fmt.Errorf("%v,%v", blockN, common.Bytes2Hex(k))
- }
- return nil
- }); err != nil {
- return err
- }
-
- fmt.Println("Check was successful")
- return nil
-}
diff --git a/cmd/verkle/verkletrie/incrementAccount.go b/cmd/verkle/verkletrie/incrementAccount.go
index d714a9a5420..b1700ec1fa5 100644
--- a/cmd/verkle/verkletrie/incrementAccount.go
+++ b/cmd/verkle/verkletrie/incrementAccount.go
@@ -50,7 +50,7 @@ func IncrementAccount(vTx kv.RwTx, tx kv.Tx, workers uint64, verkleWriter *Verkl
}
defer cancelWorkers()
- accountCursor, err := tx.CursorDupSort(kv.AccountChangeSet)
+ accountCursor, err := tx.CursorDupSort(kv.AccountChangeSetDeprecated)
if err != nil {
return err
}
diff --git a/cmd/verkle/verkletrie/incrementStorage.go b/cmd/verkle/verkletrie/incrementStorage.go
index 462396e7f8b..81b59c14d06 100644
--- a/cmd/verkle/verkletrie/incrementStorage.go
+++ b/cmd/verkle/verkletrie/incrementStorage.go
@@ -52,7 +52,7 @@ func IncrementStorage(vTx kv.RwTx, tx kv.Tx, workers uint64, verkleWriter *Verkl
}
defer cancelWorkers()
- storageCursor, err := tx.CursorDupSort(kv.StorageChangeSet)
+ storageCursor, err := tx.CursorDupSort(kv.StorageChangeSetDeprecated)
if err != nil {
return libcommon.Hash{}, err
}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index d3b17010a21..9c1d9aba3a7 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -461,10 +461,10 @@ func CalcHashRootForTests(tx kv.RwTx, header *types.Header, histV4, trace bool)
}
defer domains.Close()
- if err := tx.ClearBucket(kv.HashedAccounts); err != nil {
+ if err := tx.ClearBucket(kv.HashedAccountsDeprecated); err != nil {
return hashRoot, fmt.Errorf("clear HashedAccounts bucket: %w", err)
}
- if err := tx.ClearBucket(kv.HashedStorage); err != nil {
+ if err := tx.ClearBucket(kv.HashedStorageDeprecated); err != nil {
return hashRoot, fmt.Errorf("clear HashedStorage bucket: %w", err)
}
if err := tx.ClearBucket(kv.TrieOfAccounts); err != nil {
@@ -497,7 +497,7 @@ func CalcHashRootForTests(tx kv.RwTx, header *types.Header, histV4, trace bool)
if err != nil {
return hashRoot, fmt.Errorf("clear HashedAccounts bucket: %w", err)
}
- if err := tx.Put(kv.HashedAccounts, newK, v); err != nil {
+ if err := tx.Put(kv.HashedAccountsDeprecated, newK, v); err != nil {
return hashRoot, fmt.Errorf("clear HashedAccounts bucket: %w", err)
}
}
@@ -516,7 +516,7 @@ func CalcHashRootForTests(tx kv.RwTx, header *types.Header, histV4, trace bool)
return hashRoot, fmt.Errorf("clear HashedStorage bucket: %w", err)
}
fmt.Printf("storage %x -> %x\n", k, newK)
- if err := tx.Put(kv.HashedStorage, newK, v); err != nil {
+ if err := tx.Put(kv.HashedStorageDeprecated, newK, v); err != nil {
return hashRoot, fmt.Errorf("clear HashedStorage bucket: %w", err)
}
@@ -525,7 +525,7 @@ func CalcHashRootForTests(tx kv.RwTx, header *types.Header, histV4, trace bool)
if trace {
if GenerateTrace {
fmt.Printf("State after %d================\n", header.Number)
- it, err := tx.Range(kv.HashedAccounts, nil, nil, order.Asc, kv.Unlim)
+ it, err := tx.Range(kv.HashedAccountsDeprecated, nil, nil, order.Asc, kv.Unlim)
if err != nil {
return hashRoot, err
}
@@ -537,7 +537,7 @@ func CalcHashRootForTests(tx kv.RwTx, header *types.Header, histV4, trace bool)
fmt.Printf("%x: %x\n", k, v)
}
fmt.Printf("..................\n")
- it, err = tx.Range(kv.HashedStorage, nil, nil, order.Asc, kv.Unlim)
+ it, err = tx.Range(kv.HashedStorageDeprecated, nil, nil, order.Asc, kv.Unlim)
if err != nil {
return hashRoot, err
}
diff --git a/erigon-lib/kv/mdbx/kv_abstract_test.go b/erigon-lib/kv/mdbx/kv_abstract_test.go
index 21d1992390f..463b4788fdf 100644
--- a/erigon-lib/kv/mdbx/kv_abstract_test.go
+++ b/erigon-lib/kv/mdbx/kv_abstract_test.go
@@ -225,7 +225,7 @@ func TestRemoteKvRange(t *testing.T) {
require := require.New(t)
require.NoError(writeDB.Update(ctx, func(tx kv.RwTx) error {
- wc, err := tx.RwCursorDupSort(kv.AccountChangeSet)
+ wc, err := tx.RwCursorDupSort(kv.TblAccountVals)
require.NoError(err)
require.NoError(wc.Append([]byte{1}, []byte{1}))
require.NoError(wc.Append([]byte{1}, []byte{2}))
@@ -235,7 +235,7 @@ func TestRemoteKvRange(t *testing.T) {
}))
require.NoError(db.View(ctx, func(tx kv.Tx) error {
- c, err := tx.Cursor(kv.AccountChangeSet)
+ c, err := tx.Cursor(kv.TblAccountVals)
require.NoError(err)
k, v, err := c.First()
@@ -245,7 +245,7 @@ func TestRemoteKvRange(t *testing.T) {
// it must be possible to Stream and manipulate cursors in same time
cnt := 0
- require.NoError(tx.ForEach(kv.AccountChangeSet, nil, func(_, _ []byte) error {
+ require.NoError(tx.ForEach(kv.TblAccountVals, nil, func(_, _ []byte) error {
if cnt == 0 {
k, v, err = c.Next()
require.NoError(err)
@@ -259,7 +259,7 @@ func TestRemoteKvRange(t *testing.T) {
// remote Tx must provide Snapshots-Isolation-Level: new updates are not visible for old readers
require.NoError(writeDB.Update(ctx, func(tx kv.RwTx) error {
- require.NoError(tx.Put(kv.AccountChangeSet, []byte{4}, []byte{1}))
+ require.NoError(tx.Put(kv.TblAccountVals, []byte{4}, []byte{1}))
return nil
}))
@@ -272,7 +272,7 @@ func TestRemoteKvRange(t *testing.T) {
err = db.View(ctx, func(tx kv.Tx) error {
cntRange := func(from, to []byte) (i int) {
- it, err := tx.Range(kv.AccountChangeSet, from, to, order.Asc, kv.Unlim)
+ it, err := tx.Range(kv.TblAccountVals, from, to, order.Asc, kv.Unlim)
require.NoError(err)
for it.HasNext() {
_, _, err = it.Next()
@@ -293,7 +293,7 @@ func TestRemoteKvRange(t *testing.T) {
// Limit
err = db.View(ctx, func(tx kv.Tx) error {
cntRange := func(from, to []byte) (i int) {
- it, err := tx.Range(kv.AccountChangeSet, from, to, order.Asc, 2)
+ it, err := tx.Range(kv.TblAccountVals, from, to, order.Asc, 2)
require.NoError(err)
for it.HasNext() {
_, _, err := it.Next()
@@ -313,7 +313,7 @@ func TestRemoteKvRange(t *testing.T) {
err = db.View(ctx, func(tx kv.Tx) error {
cntRange := func(from, to []byte) (i int) {
- it, err := tx.Range(kv.AccountChangeSet, from, to, order.Desc, 2)
+ it, err := tx.Range(kv.TblAccountVals, from, to, order.Desc, 2)
require.NoError(err)
for it.HasNext() {
_, _, err := it.Next()
diff --git a/erigon-lib/kv/membatch/database_test.go b/erigon-lib/kv/membatch/database_test.go
index 35919f62fcd..6d3cc09d3ff 100644
--- a/erigon-lib/kv/membatch/database_test.go
+++ b/erigon-lib/kv/membatch/database_test.go
@@ -35,7 +35,7 @@ import (
"github.com/stretchr/testify/require"
)
-var testBucket = kv.HashedAccounts
+var testBucket = kv.HashedAccountsDeprecated
var testValues = []string{"a", "1251", "\x00123\x00"}
func TestPutGet(t *testing.T) {
diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation_test.go b/erigon-lib/kv/membatchwithdb/memory_mutation_test.go
index 463d0f67783..cd4ce3cd532 100644
--- a/erigon-lib/kv/membatchwithdb/memory_mutation_test.go
+++ b/erigon-lib/kv/membatchwithdb/memory_mutation_test.go
@@ -33,10 +33,10 @@ import (
)
func initializeDbNonDupSort(rwTx kv.RwTx) {
- rwTx.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value"))
- rwTx.Put(kv.HashedAccounts, []byte("CAAA"), []byte("value1"))
- rwTx.Put(kv.HashedAccounts, []byte("CBAA"), []byte("value2"))
- rwTx.Put(kv.HashedAccounts, []byte("CCAA"), []byte("value3"))
+ rwTx.Put(kv.HeaderNumber, []byte("AAAA"), []byte("value"))
+ rwTx.Put(kv.HeaderNumber, []byte("CAAA"), []byte("value1"))
+ rwTx.Put(kv.HeaderNumber, []byte("CBAA"), []byte("value2"))
+ rwTx.Put(kv.HeaderNumber, []byte("CCAA"), []byte("value3"))
}
func TestPutAppendHas(t *testing.T) {
@@ -45,28 +45,28 @@ func TestPutAppendHas(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "", log.Root())
- require.NoError(t, batch.Append(kv.HashedAccounts, []byte("AAAA"), []byte("value1.5")))
+ require.NoError(t, batch.Append(kv.HeaderNumber, []byte("AAAA"), []byte("value1.5")))
//MDBX's APPEND checking only keys, not values
- require.NoError(t, batch.Append(kv.HashedAccounts, []byte("AAAA"), []byte("value1.3")))
+ require.NoError(t, batch.Append(kv.HeaderNumber, []byte("AAAA"), []byte("value1.3")))
- require.NoError(t, batch.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value1.3")))
- require.NoError(t, batch.Append(kv.HashedAccounts, []byte("CBAA"), []byte("value3.5")))
+ require.NoError(t, batch.Put(kv.HeaderNumber, []byte("AAAA"), []byte("value1.3")))
+ require.NoError(t, batch.Append(kv.HeaderNumber, []byte("CBAA"), []byte("value3.5")))
//MDBX's APPEND checking only keys, not values
- require.NoError(t, batch.Append(kv.HashedAccounts, []byte("CBAA"), []byte("value3.1")))
- require.NoError(t, batch.AppendDup(kv.HashedAccounts, []byte("CBAA"), []byte("value3.1")))
- require.Error(t, batch.Append(kv.HashedAccounts, []byte("AAAA"), []byte("value1.3")))
+ require.NoError(t, batch.Append(kv.HeaderNumber, []byte("CBAA"), []byte("value3.1")))
+ require.NoError(t, batch.AppendDup(kv.HeaderNumber, []byte("CBAA"), []byte("value3.1")))
+ require.Error(t, batch.Append(kv.HeaderNumber, []byte("AAAA"), []byte("value1.3")))
require.Nil(t, batch.Flush(context.Background(), rwTx))
- exist, err := batch.Has(kv.HashedAccounts, []byte("AAAA"))
+ exist, err := batch.Has(kv.HeaderNumber, []byte("AAAA"))
require.Nil(t, err)
require.Equal(t, exist, true)
- val, err := batch.GetOne(kv.HashedAccounts, []byte("AAAA"))
+ val, err := batch.GetOne(kv.HeaderNumber, []byte("AAAA"))
require.Nil(t, err)
require.Equal(t, val, []byte("value1.3"))
- exist, err = batch.Has(kv.HashedAccounts, []byte("KKKK"))
+ exist, err = batch.Has(kv.HeaderNumber, []byte("KKKK"))
require.Nil(t, err)
require.Equal(t, exist, false)
}
@@ -77,10 +77,10 @@ func TestLastMiningDB(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "", log.Root())
- batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4"))
- batch.Put(kv.HashedAccounts, []byte("BCAA"), []byte("value5"))
+ batch.Put(kv.HeaderNumber, []byte("BAAA"), []byte("value4"))
+ batch.Put(kv.HeaderNumber, []byte("BCAA"), []byte("value5"))
- cursor, err := batch.Cursor(kv.HashedAccounts)
+ cursor, err := batch.Cursor(kv.HeaderNumber)
require.NoError(t, err)
key, value, err := cursor.Last()
@@ -101,10 +101,10 @@ func TestLastMiningMem(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "", log.Root())
- batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4"))
- batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5"))
+ batch.Put(kv.HeaderNumber, []byte("BAAA"), []byte("value4"))
+ batch.Put(kv.HeaderNumber, []byte("DCAA"), []byte("value5"))
- cursor, err := batch.Cursor(kv.HashedAccounts)
+ cursor, err := batch.Cursor(kv.HeaderNumber)
require.NoError(t, err)
key, value, err := cursor.Last()
@@ -124,14 +124,14 @@ func TestDeleteMining(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "", log.Root())
- batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4"))
- batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5"))
- batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5"))
+ batch.Put(kv.HeaderNumber, []byte("BAAA"), []byte("value4"))
+ batch.Put(kv.HeaderNumber, []byte("DCAA"), []byte("value5"))
+ batch.Put(kv.HeaderNumber, []byte("FCAA"), []byte("value5"))
- batch.Delete(kv.HashedAccounts, []byte("BAAA"))
- batch.Delete(kv.HashedAccounts, []byte("CBAA"))
+ batch.Delete(kv.HeaderNumber, []byte("BAAA"))
+ batch.Delete(kv.HeaderNumber, []byte("CBAA"))
- cursor, err := batch.Cursor(kv.HashedAccounts)
+ cursor, err := batch.Cursor(kv.HeaderNumber)
require.NoError(t, err)
key, value, err := cursor.SeekExact([]byte("BAAA"))
@@ -150,17 +150,17 @@ func TestFlush(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "", log.Root())
- batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4"))
- batch.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value5"))
- batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5"))
+ batch.Put(kv.HeaderNumber, []byte("BAAA"), []byte("value4"))
+ batch.Put(kv.HeaderNumber, []byte("AAAA"), []byte("value5"))
+ batch.Put(kv.HeaderNumber, []byte("FCAA"), []byte("value5"))
require.NoError(t, batch.Flush(context.Background(), rwTx))
- value, err := rwTx.GetOne(kv.HashedAccounts, []byte("BAAA"))
+ value, err := rwTx.GetOne(kv.HeaderNumber, []byte("BAAA"))
require.NoError(t, err)
require.Equal(t, value, []byte("value4"))
- value, err = rwTx.GetOne(kv.HashedAccounts, []byte("AAAA"))
+ value, err = rwTx.GetOne(kv.HeaderNumber, []byte("AAAA"))
require.NoError(t, err)
require.Equal(t, value, []byte("value5"))
}
@@ -171,12 +171,12 @@ func TestForEach(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "", log.Root())
- batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5"))
+ batch.Put(kv.HeaderNumber, []byte("FCAA"), []byte("value5"))
require.NoError(t, batch.Flush(context.Background(), rwTx))
var keys []string
var values []string
- err := batch.ForEach(kv.HashedAccounts, []byte("XYAZ"), func(k, v []byte) error {
+ err := batch.ForEach(kv.HeaderNumber, []byte("XYAZ"), func(k, v []byte) error {
keys = append(keys, string(k))
values = append(values, string(v))
return nil
@@ -185,7 +185,7 @@ func TestForEach(t *testing.T) {
require.Nil(t, keys)
require.Nil(t, values)
- err = batch.ForEach(kv.HashedAccounts, []byte("CC"), func(k, v []byte) error {
+ err = batch.ForEach(kv.HeaderNumber, []byte("CC"), func(k, v []byte) error {
keys = append(keys, string(k))
values = append(values, string(v))
return nil
@@ -197,7 +197,7 @@ func TestForEach(t *testing.T) {
var keys1 []string
var values1 []string
- err = batch.ForEach(kv.HashedAccounts, []byte("A"), func(k, v []byte) error {
+ err = batch.ForEach(kv.HeaderNumber, []byte("A"), func(k, v []byte) error {
keys1 = append(keys1, string(k))
values1 = append(values1, string(v))
return nil
@@ -235,14 +235,14 @@ func TestPrefix(t *testing.T) {
initializeDbNonDupSort(rwTx)
- kvs1, err := rwTx.Prefix(kv.HashedAccounts, []byte("AB"))
+ kvs1, err := rwTx.Prefix(kv.HeaderNumber, []byte("AB"))
require.Nil(t, err)
defer kvs1.Close()
require.False(t, kvs1.HasNext())
var keys1 []string
var values1 []string
- kvs2, err := rwTx.Prefix(kv.HashedAccounts, []byte("AAAA"))
+ kvs2, err := rwTx.Prefix(kv.HeaderNumber, []byte("AAAA"))
require.Nil(t, err)
defer kvs2.Close()
for kvs2.HasNext() {
@@ -256,7 +256,7 @@ func TestPrefix(t *testing.T) {
var keys []string
var values []string
- kvs3, err := rwTx.Prefix(kv.HashedAccounts, []byte("C"))
+ kvs3, err := rwTx.Prefix(kv.HeaderNumber, []byte("C"))
require.Nil(t, err)
defer kvs3.Close()
for kvs3.HasNext() {
@@ -279,7 +279,7 @@ func TestForAmount(t *testing.T) {
var keys []string
var values []string
- err := batch.ForAmount(kv.HashedAccounts, []byte("C"), uint32(3), func(k, v []byte) error {
+ err := batch.ForAmount(kv.HeaderNumber, []byte("C"), uint32(3), func(k, v []byte) error {
keys = append(keys, string(k))
values = append(values, string(v))
return nil
@@ -291,7 +291,7 @@ func TestForAmount(t *testing.T) {
var keys1 []string
var values1 []string
- err = batch.ForAmount(kv.HashedAccounts, []byte("C"), uint32(10), func(k, v []byte) error {
+ err = batch.ForAmount(kv.HeaderNumber, []byte("C"), uint32(10), func(k, v []byte) error {
keys1 = append(keys1, string(k))
values1 = append(values1, string(v))
return nil
@@ -310,17 +310,17 @@ func TestGetOneAfterClearBucket(t *testing.T) {
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
- err := batch.ClearBucket(kv.HashedAccounts)
+ err := batch.ClearBucket(kv.HeaderNumber)
require.Nil(t, err)
- cond := batch.isTableCleared(kv.HashedAccounts)
+ cond := batch.isTableCleared(kv.HeaderNumber)
require.True(t, cond)
- val, err := batch.GetOne(kv.HashedAccounts, []byte("A"))
+ val, err := batch.GetOne(kv.HeaderNumber, []byte("A"))
require.Nil(t, err)
require.Nil(t, val)
- val, err = batch.GetOne(kv.HashedAccounts, []byte("AAAA"))
+ val, err = batch.GetOne(kv.HeaderNumber, []byte("AAAA"))
require.Nil(t, err)
require.Nil(t, val)
}
@@ -333,13 +333,13 @@ func TestSeekExactAfterClearBucket(t *testing.T) {
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
- err := batch.ClearBucket(kv.HashedAccounts)
+ err := batch.ClearBucket(kv.HeaderNumber)
require.Nil(t, err)
- cond := batch.isTableCleared(kv.HashedAccounts)
+ cond := batch.isTableCleared(kv.HeaderNumber)
require.True(t, cond)
- cursor, err := batch.RwCursor(kv.HashedAccounts)
+ cursor, err := batch.RwCursor(kv.HeaderNumber)
require.NoError(t, err)
key, val, err := cursor.SeekExact([]byte("AAAA"))
@@ -369,13 +369,13 @@ func TestFirstAfterClearBucket(t *testing.T) {
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
- err := batch.ClearBucket(kv.HashedAccounts)
+ err := batch.ClearBucket(kv.HeaderNumber)
require.Nil(t, err)
- err = batch.Put(kv.HashedAccounts, []byte("BBBB"), []byte("value5"))
+ err = batch.Put(kv.HeaderNumber, []byte("BBBB"), []byte("value5"))
require.Nil(t, err)
- cursor, err := batch.Cursor(kv.HashedAccounts)
+ cursor, err := batch.Cursor(kv.HeaderNumber)
require.NoError(t, err)
key, val, err := cursor.First()
@@ -397,19 +397,19 @@ func TestIncReadSequence(t *testing.T) {
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
- _, err := batch.IncrementSequence(kv.HashedAccounts, uint64(12))
+ _, err := batch.IncrementSequence(kv.HeaderNumber, uint64(12))
require.Nil(t, err)
- val, err := batch.ReadSequence(kv.HashedAccounts)
+ val, err := batch.ReadSequence(kv.HeaderNumber)
require.Nil(t, err)
require.Equal(t, val, uint64(12))
}
func initializeDbDupSort(rwTx kv.RwTx) {
- rwTx.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.1"))
- rwTx.Put(kv.AccountChangeSet, []byte("key3"), []byte("value3.1"))
- rwTx.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.3"))
- rwTx.Put(kv.AccountChangeSet, []byte("key3"), []byte("value3.3"))
+ rwTx.Put(kv.TblAccountVals, []byte("key1"), []byte("value1.1"))
+ rwTx.Put(kv.TblAccountVals, []byte("key3"), []byte("value3.1"))
+ rwTx.Put(kv.TblAccountVals, []byte("key1"), []byte("value1.3"))
+ rwTx.Put(kv.TblAccountVals, []byte("key3"), []byte("value3.3"))
}
func TestNext(t *testing.T) {
@@ -420,9 +420,9 @@ func TestNext(t *testing.T) {
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
- batch.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.2"))
+ batch.Put(kv.TblAccountVals, []byte("key1"), []byte("value1.2"))
- cursor, err := batch.CursorDupSort(kv.AccountChangeSet)
+ cursor, err := batch.CursorDupSort(kv.TblAccountVals)
require.NoError(t, err)
k, v, err := cursor.First()
@@ -464,10 +464,10 @@ func TestNextNoDup(t *testing.T) {
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
- batch.Put(kv.AccountChangeSet, []byte("key2"), []byte("value2.1"))
- batch.Put(kv.AccountChangeSet, []byte("key2"), []byte("value2.2"))
+ batch.Put(kv.TblAccountVals, []byte("key2"), []byte("value2.1"))
+ batch.Put(kv.TblAccountVals, []byte("key2"), []byte("value2.2"))
- cursor, err := batch.CursorDupSort(kv.AccountChangeSet)
+ cursor, err := batch.CursorDupSort(kv.TblAccountVals)
require.NoError(t, err)
k, _, err := cursor.First()
@@ -491,7 +491,7 @@ func TestDeleteCurrentDuplicates(t *testing.T) {
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
- cursor, err := batch.RwCursorDupSort(kv.AccountChangeSet)
+ cursor, err := batch.RwCursorDupSort(kv.TblAccountVals)
require.NoError(t, err)
require.NoError(t, cursor.Put([]byte("key3"), []byte("value3.2")))
@@ -506,7 +506,7 @@ func TestDeleteCurrentDuplicates(t *testing.T) {
var keys []string
var values []string
- err = rwTx.ForEach(kv.AccountChangeSet, nil, func(k, v []byte) error {
+ err = rwTx.ForEach(kv.TblAccountVals, nil, func(k, v []byte) error {
keys = append(keys, string(k))
values = append(values, string(v))
return nil
@@ -520,13 +520,13 @@ func TestDeleteCurrentDuplicates(t *testing.T) {
func TestSeekBothRange(t *testing.T) {
_, rwTx := memdb.NewTestTx(t)
- rwTx.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.1"))
- rwTx.Put(kv.AccountChangeSet, []byte("key3"), []byte("value3.3"))
+ rwTx.Put(kv.TblAccountVals, []byte("key1"), []byte("value1.1"))
+ rwTx.Put(kv.TblAccountVals, []byte("key3"), []byte("value3.3"))
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
- cursor, err := batch.RwCursorDupSort(kv.AccountChangeSet)
+ cursor, err := batch.RwCursorDupSort(kv.TblAccountVals)
require.NoError(t, err)
require.NoError(t, cursor.Put([]byte("key3"), []byte("value3.1")))
diff --git a/erigon-lib/kv/remotedbserver/remotedbserver_test.go b/erigon-lib/kv/remotedbserver/remotedbserver_test.go
index d1652da8b46..5542d34cbe3 100644
--- a/erigon-lib/kv/remotedbserver/remotedbserver_test.go
+++ b/erigon-lib/kv/remotedbserver/remotedbserver_test.go
@@ -38,7 +38,7 @@ func TestKvServer_renew(t *testing.T) {
require, ctx, db := require.New(t), context.Background(), memdb.NewTestDB(t, kv.ChainDB)
require.NoError(db.Update(ctx, func(tx kv.RwTx) error {
- wc, err := tx.RwCursorDupSort(kv.AccountChangeSet)
+ wc, err := tx.RwCursorDupSort(kv.TblAccountVals)
require.NoError(err)
require.NoError(wc.Append([]byte{1}, []byte{1}))
require.NoError(wc.Append([]byte{1}, []byte{2}))
@@ -56,7 +56,7 @@ func TestKvServer_renew(t *testing.T) {
}
var c, c2 kv.Cursor
if err = s.with(id, func(tx kv.Tx) error {
- c, err = tx.Cursor(kv.AccountChangeSet)
+ c, err = tx.Cursor(kv.TblAccountVals)
return err
}); err != nil {
return err
@@ -71,11 +71,11 @@ func TestKvServer_renew(t *testing.T) {
}
if err = s.with(id, func(tx kv.Tx) error {
- c, err = tx.Cursor(kv.AccountChangeSet)
+ c, err = tx.Cursor(kv.TblAccountVals)
if err != nil {
return err
}
- c2, err = tx.Cursor(kv.AccountChangeSet)
+ c2, err = tx.Cursor(kv.TblAccountVals)
return err
}); err != nil {
return err
diff --git a/erigon-lib/kv/stream/stream_test.go b/erigon-lib/kv/stream/stream_test.go
index b039f4d8466..90bbf0c513d 100644
--- a/erigon-lib/kv/stream/stream_test.go
+++ b/erigon-lib/kv/stream/stream_test.go
@@ -85,13 +85,13 @@ func TestUnionPairs(t *testing.T) {
require := require.New(t)
tx, _ := db.BeginRw(ctx)
defer tx.Rollback()
- _ = tx.Put(kv.E2AccountsHistory, []byte{1}, []byte{1})
- _ = tx.Put(kv.E2AccountsHistory, []byte{3}, []byte{1})
- _ = tx.Put(kv.E2AccountsHistory, []byte{4}, []byte{1})
- _ = tx.Put(kv.AccountChangeSet, []byte{2}, []byte{9})
- _ = tx.Put(kv.AccountChangeSet, []byte{3}, []byte{9})
- it, _ := tx.Range(kv.E2AccountsHistory, nil, nil, order.Asc, kv.Unlim)
- it2, _ := tx.Range(kv.AccountChangeSet, nil, nil, order.Asc, kv.Unlim)
+ _ = tx.Put(kv.HeaderNumber, []byte{1}, []byte{1})
+ _ = tx.Put(kv.HeaderNumber, []byte{3}, []byte{1})
+ _ = tx.Put(kv.HeaderNumber, []byte{4}, []byte{1})
+ _ = tx.Put(kv.TblAccountVals, []byte{2}, []byte{9})
+ _ = tx.Put(kv.TblAccountVals, []byte{3}, []byte{9})
+ it, _ := tx.Range(kv.HeaderNumber, nil, nil, order.Asc, kv.Unlim)
+ it2, _ := tx.Range(kv.TblAccountVals, nil, nil, order.Asc, kv.Unlim)
keys, values, err := stream.ToArrayKV(stream.UnionKV(it, it2, -1))
require.NoError(err)
require.Equal([][]byte{{1}, {2}, {3}, {4}}, keys)
@@ -101,10 +101,10 @@ func TestUnionPairs(t *testing.T) {
require := require.New(t)
tx, _ := db.BeginRw(ctx)
defer tx.Rollback()
- _ = tx.Put(kv.AccountChangeSet, []byte{2}, []byte{9})
- _ = tx.Put(kv.AccountChangeSet, []byte{3}, []byte{9})
- it, _ := tx.Range(kv.E2AccountsHistory, nil, nil, order.Asc, kv.Unlim)
- it2, _ := tx.Range(kv.AccountChangeSet, nil, nil, order.Asc, kv.Unlim)
+ _ = tx.Put(kv.TblAccountVals, []byte{2}, []byte{9})
+ _ = tx.Put(kv.TblAccountVals, []byte{3}, []byte{9})
+ it, _ := tx.Range(kv.HeaderNumber, nil, nil, order.Asc, kv.Unlim)
+ it2, _ := tx.Range(kv.TblAccountVals, nil, nil, order.Asc, kv.Unlim)
keys, _, err := stream.ToArrayKV(stream.UnionKV(it, it2, -1))
require.NoError(err)
require.Equal([][]byte{{2}, {3}}, keys)
@@ -113,11 +113,11 @@ func TestUnionPairs(t *testing.T) {
require := require.New(t)
tx, _ := db.BeginRw(ctx)
defer tx.Rollback()
- _ = tx.Put(kv.E2AccountsHistory, []byte{1}, []byte{1})
- _ = tx.Put(kv.E2AccountsHistory, []byte{3}, []byte{1})
- _ = tx.Put(kv.E2AccountsHistory, []byte{4}, []byte{1})
- it, _ := tx.Range(kv.E2AccountsHistory, nil, nil, order.Asc, kv.Unlim)
- it2, _ := tx.Range(kv.AccountChangeSet, nil, nil, order.Asc, kv.Unlim)
+ _ = tx.Put(kv.HeaderNumber, []byte{1}, []byte{1})
+ _ = tx.Put(kv.HeaderNumber, []byte{3}, []byte{1})
+ _ = tx.Put(kv.HeaderNumber, []byte{4}, []byte{1})
+ it, _ := tx.Range(kv.HeaderNumber, nil, nil, order.Asc, kv.Unlim)
+ it2, _ := tx.Range(kv.TblAccountVals, nil, nil, order.Asc, kv.Unlim)
keys, _, err := stream.ToArrayKV(stream.UnionKV(it, it2, -1))
require.NoError(err)
require.Equal([][]byte{{1}, {3}, {4}}, keys)
@@ -126,8 +126,8 @@ func TestUnionPairs(t *testing.T) {
require := require.New(t)
tx, _ := db.BeginRw(ctx)
defer tx.Rollback()
- it, _ := tx.Range(kv.E2AccountsHistory, nil, nil, order.Asc, kv.Unlim)
- it2, _ := tx.Range(kv.AccountChangeSet, nil, nil, order.Asc, kv.Unlim)
+ it, _ := tx.Range(kv.HeaderNumber, nil, nil, order.Asc, kv.Unlim)
+ it2, _ := tx.Range(kv.TblAccountVals, nil, nil, order.Asc, kv.Unlim)
m := stream.UnionKV(it, it2, -1)
require.False(m.HasNext())
})
diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go
index 505023cc39c..99b5a0873ab 100644
--- a/erigon-lib/kv/tables.go
+++ b/erigon-lib/kv/tables.go
@@ -30,83 +30,11 @@ import (
// 6.1 - Canonical/NonCanonical/BadBlock transitions now stored in same table: kv.EthTx. Add kv.BadBlockNumber table
var DBSchemaVersion = types.VersionReply{Major: 7, Minor: 0, Patch: 0}
-// ChaindataTables
-
-// Dictionary:
-// "Plain State" - state where keys arent' hashed. "CurrentState" - same, but keys are hashed. "PlainState" used for blocks execution. "CurrentState" used mostly for Merkle root calculation.
-// "incarnation" - uint64 number - how much times given account was SelfDestruct'ed.
-
-/*
-PlainState logical layout:
-
- Contains Accounts:
- key - address (unhashed)
- value - account encoded for storage
- Contains Storage:
- key - address (unhashed) + incarnation + storage key (unhashed)
- value - storage value(common.hash)
-
-Physical layout:
-
- PlainState and HashedStorage utilises DupSort feature of MDBX (store multiple values inside 1 key).
-
--------------------------------------------------------------
-
- key | value
-
--------------------------------------------------------------
-[acc_hash] | [acc_value]
-[acc_hash]+[inc] | [storage1_hash]+[storage1_value]
-
- | [storage2_hash]+[storage2_value] // this value has no own key. it's 2nd value of [acc_hash]+[inc] key.
- | [storage3_hash]+[storage3_value]
- | ...
-
-[acc_hash]+[old_inc] | [storage1_hash]+[storage1_value]
-
- | ...
-
-[acc2_hash] | [acc2_value]
-
- ...
-*/
-const PlainState = "PlainState"
-
// PlainContractCode -
// key - address+incarnation
// value - code hash
const PlainContractCode = "PlainCodeHash"
-/*
-AccountChangeSet and StorageChangeSet - of block N store values of state before block N changed them.
-Because values "after" change stored in PlainState.
-Logical format:
-
- key - blockNum_u64 + key_in_plain_state
- value - value_in_plain_state_before_blockNum_changes
-
-Example: If block N changed account A from value X to Y. Then:
-
- AccountChangeSet has record: bigEndian(N) + A -> X
- PlainState has record: A -> Y
-
-See also: docs/programmers_guide/db_walkthrough.MD#table-history-of-accounts
-
-As you can see if block N changes much accounts - then all records have repetitive prefix `bigEndian(N)`.
-MDBX can store such prefixes only once - by DupSort feature (see `docs/programmers_guide/dupsort.md`).
-Both buckets are DupSort-ed and have physical format:
-AccountChangeSet:
-
- key - blockNum_u64
- value - address + account(encoded)
-
-StorageChangeSet:
-
- key - blockNum_u64 + address + incarnation_u64
- value - plain_storage_key + value
-*/
-const AccountChangeSet = "AccountChangeSet"
-const StorageChangeSet = "StorageChangeSet"
const ChangeSets3 = "ChangeSets3"
const (
@@ -117,51 +45,10 @@ const (
// Contains Storage:
//key - address hash + incarnation + storage key hash
//value - storage value(common.hash)
- HashedAccounts = "HashedAccount"
- HashedStorage = "HashedStorage"
+ HashedAccountsDeprecated = "HashedAccount"
+ HashedStorageDeprecated = "HashedStorage"
)
-/*
-AccountsHistory and StorageHistory - indices designed to serve next 2 type of requests:
-1. what is smallest block number >= X where account A changed
-2. get last shard of A - to append there new block numbers
-
-Task 1. is part of "get historical state" operation (see `core/state:DomainGetAsOf`):
-If `db.seekInFiles(A+bigEndian(X))` returns non-last shard -
-
- then get block number from shard value Y := RoaringBitmap(shard_value).GetGte(X)
- and with Y go to ChangeSets: db.Get(ChangeSets, Y+A)
-
-If `db.seekInFiles(A+bigEndian(X))` returns last shard -
-
- then we go to PlainState: db.Get(PlainState, A)
-
-Format:
- - index split to shards by 2Kb - RoaringBitmap encoded sorted list of block numbers
- (to avoid performance degradation of popular accounts or look deep into history.
- Also 2Kb allows avoid Overflow pages inside DB.)
- - if shard is not last - then key has suffix 8 bytes = bigEndian(max_block_num_in_this_shard)
- - if shard is last - then key has suffix 8 bytes = 0xFF
-
-It allows:
- - server task 1. by 1 db operation db.seekInFiles(A+bigEndian(X))
- - server task 2. by 1 db operation db.Get(A+0xFF)
-
-see also: docs/programmers_guide/db_walkthrough.MD#table-change-sets
-
-AccountsHistory:
-
- key - address + shard_id_u64
- value - roaring bitmap - list of block where it changed
-
-StorageHistory
-
- key - address + storage_key + shard_id_u64
- value - roaring bitmap - list of block where it changed
-*/
-const E2AccountsHistory = "AccountHistory"
-const E2StorageHistory = "StorageHistory"
-
const (
//key - contract code hash
@@ -171,63 +58,8 @@ const (
//key - addressHash+incarnation
//value - code hash
ContractCode = "HashedCodeHash"
-
- // IncarnationMap for deleted accounts
- //key - address
- //value - incarnation of account when it was last deleted
- IncarnationMap = "IncarnationMap"
)
-/*
-TrieOfAccounts and TrieOfStorage
-hasState,groups - mark prefixes existing in hashed_account table
-hasTree - mark prefixes existing in trie_account table (not related with branchNodes)
-hasHash - mark prefixes which hashes are saved in current trie_account record (actually only hashes of branchNodes can be saved)
-@see UnmarshalTrieNode
-@see integrity.Trie
-
-+-----------------------------------------------------------------------------------------------------+
-| DB record: 0x0B, hasState: 0b1011, hasTree: 0b1001, hasHash: 0b1001, hashes: [x,x] |
-+-----------------------------------------------------------------------------------------------------+
-
- | | |
- v | v
-
-+---------------------------------------------+ | +--------------------------------------+
-| DB record: 0x0B00, hasState: 0b10001 | | | DB record: 0x0B03, hasState: 0b10010 |
-| hasTree: 0, hasHash: 0b10000, hashes: [x] | | | hasTree: 0, hasHash: 0, hashes: [] |
-+---------------------------------------------+ | +--------------------------------------+
-
- | | | | |
- v v v v v
-
-+------------------+ +----------------------+ +---------------+ +---------------+ +---------------+
-| Account: | | BranchNode: 0x0B0004 | | Account: | | Account: | | Account: |
-| 0x0B0000... | | has no record in | | 0x0B01... | | 0x0B0301... | | 0x0B0304... |
-| in HashedAccount | | TrieAccount | | | | | | |
-+------------------+ +----------------------+ +---------------+ +---------------+ +---------------+
-
- | |
- v v
- +---------------+ +---------------+
- | Account: | | Account: |
- | 0x0B000400... | | 0x0B000401... |
- +---------------+ +---------------+
-
-Invariants:
-- hasTree is subset of hasState
-- hasHash is subset of hasState
-- first level in account_trie always exists if hasState>0
-- TrieStorage record of account.root (length=40) must have +1 hash - it's account.root
-- each record in TrieAccount table must have parent (may be not direct) and this parent must have correct bit in hasTree bitmap
-- if hasState has bit - then HashedAccount table must have record according to this bit
-- each TrieAccount record must cover some state (means hasState is always > 0)
-- TrieAccount records with length=1 can satisfy (hasBranch==0&&hasHash==0) condition
-- Other records in TrieAccount and TrieStorage must (hasTree!=0 || hasHash!=0)
-*/
-const TrieOfAccounts = "TrieAccount"
-const TrieOfStorage = "TrieStorage"
-
// Mapping [block number] => [Verkle Root]
const VerkleRoots = "VerkleRoots"
@@ -508,8 +340,6 @@ var ChaindataTables = []string{
SyncStageProgress,
PlainState,
PlainContractCode,
- AccountChangeSet,
- StorageChangeSet,
ChangeSets3,
Senders,
HeadBlockKey,
@@ -526,8 +356,6 @@ var ChaindataTables = []string{
EthTx,
TrieOfAccounts,
TrieOfStorage,
- HashedAccounts,
- HashedStorage,
HeaderCanonical,
Headers,
HeaderTD,
@@ -627,6 +455,11 @@ var ChaindataTables = []string{
ActiveValidatorIndicies,
EffectiveBalancesDump,
BalancesDump,
+
+ AccountChangeSetDeprecated,
+ StorageChangeSetDeprecated,
+ HashedAccountsDeprecated,
+ HashedStorageDeprecated,
}
const (
@@ -709,14 +542,12 @@ type TableCfgItem struct {
}
var ChaindataTablesCfg = TableCfg{
- HashedStorage: {
+ HashedStorageDeprecated: {
Flags: DupSort,
AutoDupSortKeysConversion: true,
DupFromLen: 72,
DupToLen: 40,
},
- AccountChangeSet: {Flags: DupSort},
- StorageChangeSet: {Flags: DupSort},
PlainState: {
Flags: DupSort,
AutoDupSortKeysConversion: true,
@@ -992,3 +823,176 @@ func String2Appendable(in string) (Appendable, error) {
return Appendable(MaxUint16), fmt.Errorf("unknown Appendable name: %s", in)
}
}
+
+// --- Deprecated
+const (
+
+ // ChaindataTables
+
+ // Dictionary:
+ // "Plain State" - state where keys arent' hashed. "CurrentState" - same, but keys are hashed. "PlainState" used for blocks execution. "CurrentState" used mostly for Merkle root calculation.
+ // "incarnation" - uint64 number - how much times given account was SelfDestruct'ed.
+
+ /*
+ PlainState logical layout:
+
+ Contains Accounts:
+ key - address (unhashed)
+ value - account encoded for storage
+ Contains Storage:
+ key - address (unhashed) + incarnation + storage key (unhashed)
+ value - storage value(common.hash)
+
+ Physical layout:
+
+ PlainState and HashedStorage utilises DupSort feature of MDBX (store multiple values inside 1 key).
+
+ -------------------------------------------------------------
+
+ key | value
+
+ -------------------------------------------------------------
+ [acc_hash] | [acc_value]
+ [acc_hash]+[inc] | [storage1_hash]+[storage1_value]
+
+ | [storage2_hash]+[storage2_value] // this value has no own key. it's 2nd value of [acc_hash]+[inc] key.
+ | [storage3_hash]+[storage3_value]
+ | ...
+
+ [acc_hash]+[old_inc] | [storage1_hash]+[storage1_value]
+
+ | ...
+
+ [acc2_hash] | [acc2_value]
+
+ ...
+ */
+ PlainState = "PlainState"
+
+ /*
+ AccountChangeSet and StorageChangeSet - of block N store values of state before block N changed them.
+ Because values "after" change stored in PlainState.
+ Logical format:
+
+ key - blockNum_u64 + key_in_plain_state
+ value - value_in_plain_state_before_blockNum_changes
+
+ Example: If block N changed account A from value X to Y. Then:
+
+ AccountChangeSet has record: bigEndian(N) + A -> X
+ PlainState has record: A -> Y
+
+ See also: docs/programmers_guide/db_walkthrough.MD#table-history-of-accounts
+
+ As you can see if block N changes much accounts - then all records have repetitive prefix `bigEndian(N)`.
+ MDBX can store such prefixes only once - by DupSort feature (see `docs/programmers_guide/dupsort.md`).
+ Both buckets are DupSort-ed and have physical format:
+ AccountChangeSet:
+
+ key - blockNum_u64
+ value - address + account(encoded)
+
+ StorageChangeSet:
+
+ key - blockNum_u64 + address + incarnation_u64
+ value - plain_storage_key + value
+ */
+ AccountChangeSetDeprecated = "AccountChangeSet"
+ StorageChangeSetDeprecated = "StorageChangeSet"
+
+ /*
+ AccountsHistory and StorageHistory - indices designed to serve next 2 type of requests:
+ 1. what is smallest block number >= X where account A changed
+ 2. get last shard of A - to append there new block numbers
+
+ Task 1. is part of "get historical state" operation (see `core/state:DomainGetAsOf`):
+ If `db.seekInFiles(A+bigEndian(X))` returns non-last shard -
+
+ then get block number from shard value Y := RoaringBitmap(shard_value).GetGte(X)
+ and with Y go to ChangeSets: db.Get(ChangeSets, Y+A)
+
+ If `db.seekInFiles(A+bigEndian(X))` returns last shard -
+
+ then we go to PlainState: db.Get(PlainState, A)
+
+ Format:
+ - index split to shards by 2Kb - RoaringBitmap encoded sorted list of block numbers
+ (to avoid performance degradation of popular accounts or look deep into history.
+ Also 2Kb allows avoid Overflow pages inside DB.)
+ - if shard is not last - then key has suffix 8 bytes = bigEndian(max_block_num_in_this_shard)
+ - if shard is last - then key has suffix 8 bytes = 0xFF
+
+ It allows:
+ - server task 1. by 1 db operation db.seekInFiles(A+bigEndian(X))
+ - server task 2. by 1 db operation db.Get(A+0xFF)
+
+ see also: docs/programmers_guide/db_walkthrough.MD#table-change-sets
+
+ AccountsHistory:
+
+ key - address + shard_id_u64
+ value - roaring bitmap - list of block where it changed
+
+ StorageHistory
+
+ key - address + storage_key + shard_id_u64
+ value - roaring bitmap - list of block where it changed
+ */
+ E2AccountsHistory = "AccountHistory"
+ E2StorageHistory = "StorageHistory"
+
+ /*
+ TrieOfAccounts and TrieOfStorage
+ hasState,groups - mark prefixes existing in hashed_account table
+ hasTree - mark prefixes existing in trie_account table (not related with branchNodes)
+ hasHash - mark prefixes which hashes are saved in current trie_account record (actually only hashes of branchNodes can be saved)
+ @see UnmarshalTrieNode
+ @see integrity.Trie
+
+ +-----------------------------------------------------------------------------------------------------+
+ | DB record: 0x0B, hasState: 0b1011, hasTree: 0b1001, hasHash: 0b1001, hashes: [x,x] |
+ +-----------------------------------------------------------------------------------------------------+
+
+ | | |
+ v | v
+
+ +---------------------------------------------+ | +--------------------------------------+
+ | DB record: 0x0B00, hasState: 0b10001 | | | DB record: 0x0B03, hasState: 0b10010 |
+ | hasTree: 0, hasHash: 0b10000, hashes: [x] | | | hasTree: 0, hasHash: 0, hashes: [] |
+ +---------------------------------------------+ | +--------------------------------------+
+
+ | | | | |
+ v v v v v
+
+ +------------------+ +----------------------+ +---------------+ +---------------+ +---------------+
+ | Account: | | BranchNode: 0x0B0004 | | Account: | | Account: | | Account: |
+ | 0x0B0000... | | has no record in | | 0x0B01... | | 0x0B0301... | | 0x0B0304... |
+ | in HashedAccount | | TrieAccount | | | | | | |
+ +------------------+ +----------------------+ +---------------+ +---------------+ +---------------+
+
+ | |
+ v v
+ +---------------+ +---------------+
+ | Account: | | Account: |
+ | 0x0B000400... | | 0x0B000401... |
+ +---------------+ +---------------+
+
+ Invariants:
+ - hasTree is subset of hasState
+ - hasHash is subset of hasState
+ - first level in account_trie always exists if hasState>0
+ - TrieStorage record of account.root (length=40) must have +1 hash - it's account.root
+ - each record in TrieAccount table must have parent (may be not direct) and this parent must have correct bit in hasTree bitmap
+ - if hasState has bit - then HashedAccount table must have record according to this bit
+ - each TrieAccount record must cover some state (means hasState is always > 0)
+ - TrieAccount records with length=1 can satisfy (hasBranch==0&&hasHash==0) condition
+ - Other records in TrieAccount and TrieStorage must (hasTree!=0 || hasHash!=0)
+ */
+ TrieOfAccounts = "TrieAccount"
+ TrieOfStorage = "TrieStorage"
+
+ // IncarnationMap for deleted accounts
+ //key - address
+ //value - incarnation of account when it was last deleted
+ IncarnationMap = "IncarnationMap"
+)
diff --git a/erigon-lib/kv/temporal/historyv2/account_changeset.go b/erigon-lib/kv/temporal/historyv2/account_changeset.go
index 89192f5ee81..fd38a694ae0 100644
--- a/erigon-lib/kv/temporal/historyv2/account_changeset.go
+++ b/erigon-lib/kv/temporal/historyv2/account_changeset.go
@@ -17,40 +17,12 @@
package historyv2
import (
- "bytes"
"encoding/binary"
"fmt"
- "sort"
- "github.com/erigontech/erigon-lib/common/hexutility"
"github.com/erigontech/erigon-lib/common/length"
- "github.com/erigontech/erigon-lib/kv"
)
-type Encoder func(blockN uint64, s *ChangeSet, f func(k, v []byte) error) error
-type Decoder func(dbKey, dbValue []byte) (blockN uint64, k, v []byte, err error)
-
-func NewAccountChangeSet() *ChangeSet {
- return &ChangeSet{
- Changes: make([]Change, 0),
- keyLen: length.Addr,
- }
-}
-
-func EncodeAccounts(blockN uint64, s *ChangeSet, f func(k, v []byte) error) error {
- sort.Sort(s)
- newK := hexutility.EncodeTs(blockN)
- for _, cs := range s.Changes {
- newV := make([]byte, len(cs.Key)+len(cs.Value))
- copy(newV, cs.Key)
- copy(newV[len(cs.Key):], cs.Value)
- if err := f(newK, newV); err != nil {
- return err
- }
- }
- return nil
-}
-
func DecodeAccounts(dbKey, dbValue []byte) (uint64, []byte, []byte, error) {
blockN := binary.BigEndian.Uint64(dbKey)
if len(dbValue) < length.Addr {
@@ -60,19 +32,3 @@ func DecodeAccounts(dbKey, dbValue []byte) (uint64, []byte, []byte, error) {
v := dbValue[length.Addr:]
return blockN, k, v, nil
}
-
-func FindAccount(c kv.CursorDupSort, blockNumber uint64, key []byte) ([]byte, error) {
- k := hexutility.EncodeTs(blockNumber)
- v, err := c.SeekBothRange(k, key)
- if err != nil {
- return nil, err
- }
- _, k, v, err = DecodeAccounts(k, v)
- if err != nil {
- return nil, err
- }
- if !bytes.HasPrefix(k, key) {
- return nil, nil
- }
- return v, nil
-}
diff --git a/erigon-lib/kv/temporal/historyv2/account_changeset_test.go b/erigon-lib/kv/temporal/historyv2/account_changeset_test.go
deleted file mode 100644
index e86634d9d57..00000000000
--- a/erigon-lib/kv/temporal/historyv2/account_changeset_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2022 The Erigon Authors
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package historyv2
-
-import (
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
- "reflect"
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "github.com/erigontech/erigon-lib/common/hexutility"
- "github.com/erigontech/erigon-lib/kv"
-)
-
-func TestEncodingAccount(t *testing.T) {
- bkt := kv.AccountChangeSet
- m := Mapper[bkt]
-
- ch := m.New()
- // empty StorageChangeSset first
- err := m.Encode(1, ch, func(k, v []byte) error {
- return errors.New("must never call")
- })
- assert.NoError(t, err)
-
- vals := [][]byte{
- hexutility.MustDecodeHex("f7f6db1eb17c6d582078e0ffdd0c"),
- hexutility.MustDecodeHex("b1e9b5c16355eede662031dd621d08faf4ea"),
- hexutility.MustDecodeHex("862cf52b74f1cea41ddd8ffa4b3e7c7790"),
- }
- numOfElements := 3
- for i := 0; i < numOfElements; i++ {
- address := hexutility.MustDecodeHex(fmt.Sprintf("0xBe828AD8B538D1D691891F6c725dEdc5989abBc%d", i))
- err2 := ch.Add(address, vals[i])
- if err2 != nil {
- t.Fatal(err)
- }
- }
-
- ch2 := m.New()
- err = m.Encode(1, ch, func(k, v []byte) error {
- var err error
- _, k, v, err = m.Decode(k, v)
- if err != nil {
- return err
- }
- return ch2.Add(k, v)
- })
- if err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(ch, ch2) {
- fmt.Println("ch", len(ch.Changes), "ch2", len(ch2.Changes))
- for i, v := range ch.Changes {
- fmt.Println("Line ", i)
-
- if !bytes.Equal(v.Key, ch2.Changes[i].Key) || !bytes.Equal(v.Value, ch2.Changes[i].Value) {
- fmt.Println("Diff ", i)
- fmt.Println("k1", hex.EncodeToString(v.Key), len(v.Key))
- fmt.Println("k2", hex.EncodeToString(ch2.Changes[i].Key))
- fmt.Println("v1", hex.EncodeToString(v.Value))
- fmt.Println("v2", hex.EncodeToString(ch2.Changes[i].Value))
- }
- }
- fmt.Printf("%+v %+v\n", ch, ch2)
- t.Fatal("not equal")
- }
-}
diff --git a/erigon-lib/kv/temporal/historyv2/changeset.go b/erigon-lib/kv/temporal/historyv2/changeset.go
index 9027b0cb830..33bfdd1e6d5 100644
--- a/erigon-lib/kv/temporal/historyv2/changeset.go
+++ b/erigon-lib/kv/temporal/historyv2/changeset.go
@@ -15,235 +15,3 @@
// along with Erigon. If not, see .
package historyv2
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- math2 "math"
- "reflect"
-
- "github.com/erigontech/erigon-lib/common/hexutility"
- "github.com/erigontech/erigon-lib/common/length"
- "github.com/erigontech/erigon-lib/kv"
-)
-
-func NewChangeSet() *ChangeSet {
- return &ChangeSet{
- Changes: make([]Change, 0),
- }
-}
-
-type Change struct {
- Key []byte
- Value []byte
-}
-
-// ChangeSet is a map with keys of the same size.
-// Both keys and values are byte strings.
-type ChangeSet struct {
- // Invariant: all keys are of the same size.
- Changes []Change
- keyLen int
-}
-
-// BEGIN sort.Interface
-
-func (s *ChangeSet) Len() int {
- return len(s.Changes)
-}
-
-func (s *ChangeSet) Swap(i, j int) {
- s.Changes[i], s.Changes[j] = s.Changes[j], s.Changes[i]
-}
-
-func (s *ChangeSet) Less(i, j int) bool {
- cmp := bytes.Compare(s.Changes[i].Key, s.Changes[j].Key)
- if cmp == 0 {
- cmp = bytes.Compare(s.Changes[i].Value, s.Changes[j].Value)
- }
- return cmp < 0
-}
-
-// END sort.Interface
-func (s *ChangeSet) KeySize() int {
- if s.keyLen != 0 {
- return s.keyLen
- }
- for _, c := range s.Changes {
- return len(c.Key)
- }
- return 0
-}
-
-func (s *ChangeSet) checkKeySize(key []byte) error {
- if (s.Len() == 0 && s.KeySize() == 0) || (len(key) == s.KeySize() && len(key) > 0) {
- return nil
- }
-
- return fmt.Errorf("wrong key size in AccountChangeSet: expected %d, actual %d", s.KeySize(), len(key))
-}
-
-// Add adds a new entry to the AccountChangeSet.
-// One must not add an existing key
-// and may add keys only of the same size.
-func (s *ChangeSet) Add(key []byte, value []byte) error {
- if err := s.checkKeySize(key); err != nil {
- return err
- }
-
- s.Changes = append(s.Changes, Change{
- Key: key,
- Value: value,
- })
- return nil
-}
-
-func (s *ChangeSet) ChangedKeys() map[string]struct{} {
- m := make(map[string]struct{}, len(s.Changes))
- for i := range s.Changes {
- m[string(s.Changes[i].Key)] = struct{}{}
- }
- return m
-}
-
-func (s *ChangeSet) Equals(s2 *ChangeSet) bool {
- return reflect.DeepEqual(s.Changes, s2.Changes)
-}
-
-// Encoded Method
-func FromDBFormat(dbKey, dbValue []byte) (uint64, []byte, []byte, error) {
- if len(dbKey) == 8 {
- return DecodeAccounts(dbKey, dbValue)
- } else {
- return DecodeStorage(dbKey, dbValue)
- }
-}
-
-func AvailableFrom(tx kv.Tx) (uint64, error) {
- c, err := tx.Cursor(kv.AccountChangeSet)
- if err != nil {
- return math2.MaxUint64, err
- }
- defer c.Close()
- k, _, err := c.First()
- if err != nil {
- return math2.MaxUint64, err
- }
- if len(k) == 0 {
- return math2.MaxUint64, nil
- }
- return binary.BigEndian.Uint64(k), nil
-}
-func AvailableStorageFrom(tx kv.Tx) (uint64, error) {
- c, err := tx.Cursor(kv.StorageChangeSet)
- if err != nil {
- return math2.MaxUint64, err
- }
- defer c.Close()
- k, _, err := c.First()
- if err != nil {
- return math2.MaxUint64, err
- }
- if len(k) == 0 {
- return math2.MaxUint64, nil
- }
- return binary.BigEndian.Uint64(k), nil
-}
-
-func ForEach(db kv.Tx, bucket string, startkey []byte, walker func(blockN uint64, k, v []byte) error) error {
- var blockN uint64
- return db.ForEach(bucket, startkey, func(k, v []byte) error {
- var err error
- blockN, k, v, err = FromDBFormat(k, v)
- if err != nil {
- return err
- }
- return walker(blockN, k, v)
- })
-}
-
-func Truncate(tx kv.RwTx, from uint64) error {
- keyStart := hexutility.EncodeTs(from)
-
- {
- c, err := tx.RwCursorDupSort(kv.AccountChangeSet)
- if err != nil {
- return err
- }
- defer c.Close()
- for k, _, err := c.Seek(keyStart); k != nil; k, _, err = c.NextNoDup() {
- if err != nil {
- return err
- }
- if err = tx.Delete(kv.AccountChangeSet, k); err != nil {
- return err
- }
- if err != nil {
- return err
- }
- }
- }
- {
- c, err := tx.RwCursorDupSort(kv.StorageChangeSet)
- if err != nil {
- return err
- }
- defer c.Close()
- for k, _, err := c.Seek(keyStart); k != nil; k, _, err = c.NextNoDup() {
- if err != nil {
- return err
- }
- if err = tx.Delete(kv.StorageChangeSet, k); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-type CSMapper struct {
- IndexBucket string
- IndexChunkKey func([]byte, uint64) []byte
- Find func(cursor kv.CursorDupSort, blockNumber uint64, key []byte) ([]byte, error)
- New func() *ChangeSet
- Encode Encoder
- Decode Decoder
-}
-
-var Mapper = map[string]CSMapper{
- kv.AccountChangeSet: {
- IndexBucket: kv.E2AccountsHistory,
- IndexChunkKey: AccountIndexChunkKey,
- New: NewAccountChangeSet,
- Find: FindAccount,
- Encode: EncodeAccounts,
- Decode: DecodeAccounts,
- },
- kv.StorageChangeSet: {
- IndexBucket: kv.E2StorageHistory,
- IndexChunkKey: StorageIndexChunkKey,
- Find: FindStorage,
- New: NewStorageChangeSet,
- Encode: EncodeStorage,
- Decode: DecodeStorage,
- },
-}
-
-func AccountIndexChunkKey(key []byte, blockNumber uint64) []byte {
- blockNumBytes := make([]byte, length.Addr+8)
- copy(blockNumBytes, key)
- binary.BigEndian.PutUint64(blockNumBytes[length.Addr:], blockNumber)
-
- return blockNumBytes
-}
-
-func StorageIndexChunkKey(key []byte, blockNumber uint64) []byte {
- //remove incarnation and add block number
- blockNumBytes := make([]byte, length.Addr+length.Hash+8)
- copy(blockNumBytes, key[:length.Addr])
- copy(blockNumBytes[length.Addr:], key[length.Addr+length.Incarnation:])
- binary.BigEndian.PutUint64(blockNumBytes[length.Addr+length.Hash:], blockNumber)
-
- return blockNumBytes
-}
diff --git a/erigon-lib/kv/temporal/historyv2/find_by_history.go b/erigon-lib/kv/temporal/historyv2/find_by_history.go
deleted file mode 100644
index f21a47d4bce..00000000000
--- a/erigon-lib/kv/temporal/historyv2/find_by_history.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2022 The Erigon Authors
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package historyv2
-
-import (
- "bytes"
- "errors"
- "fmt"
-
- "github.com/RoaringBitmap/roaring/roaring64"
- "github.com/erigontech/erigon-lib/common/length"
- "github.com/erigontech/erigon-lib/kv"
- "github.com/erigontech/erigon-lib/kv/bitmapdb"
-)
-
-func FindByHistory(indexC kv.Cursor, changesC kv.CursorDupSort, storage bool, key []byte, timestamp uint64) ([]byte, bool, error) {
- var csBucket string
- if storage {
- csBucket = kv.StorageChangeSet
- } else {
- csBucket = kv.AccountChangeSet
- }
-
- k, v, seekErr := indexC.Seek(Mapper[csBucket].IndexChunkKey(key, timestamp))
- if seekErr != nil {
- return nil, false, seekErr
- }
-
- if k == nil {
- return nil, false, nil
- }
- if storage {
- if !bytes.Equal(k[:length.Addr], key[:length.Addr]) ||
- !bytes.Equal(k[length.Addr:length.Addr+length.Hash], key[length.Addr+length.Incarnation:]) {
- return nil, false, nil
- }
- } else {
- if !bytes.HasPrefix(k, key) {
- return nil, false, nil
- }
- }
- index := roaring64.New()
- if _, err := index.ReadFrom(bytes.NewReader(v)); err != nil {
- return nil, false, err
- }
- found, ok := bitmapdb.SeekInBitmap64(index, timestamp)
- changeSetBlock := found
-
- var data []byte
- var err error
- if ok {
- data, err = Mapper[csBucket].Find(changesC, changeSetBlock, key)
- if err != nil {
- if !errors.Is(err, ErrNotFound) {
- return nil, false, fmt.Errorf("finding %x in the changeset %d: %w", key, changeSetBlock, err)
- }
- return nil, false, nil
- }
- } else {
- return nil, false, nil
- }
-
- return data, true, nil
-}
diff --git a/erigon-lib/kv/temporal/historyv2/storage_changeset.go b/erigon-lib/kv/temporal/historyv2/storage_changeset.go
index cd769b0c4c7..50d72df40b3 100644
--- a/erigon-lib/kv/temporal/historyv2/storage_changeset.go
+++ b/erigon-lib/kv/temporal/historyv2/storage_changeset.go
@@ -17,43 +17,12 @@
package historyv2
import (
- "bytes"
"encoding/binary"
- "errors"
"fmt"
- "sort"
"github.com/erigontech/erigon-lib/common/length"
- "github.com/erigontech/erigon-lib/kv"
)
-var (
- ErrNotFound = errors.New("not found")
-)
-
-func NewStorageChangeSet() *ChangeSet {
- return &ChangeSet{
- Changes: make([]Change, 0),
- keyLen: length.Addr + length.Hash + length.Incarnation,
- }
-}
-
-func EncodeStorage(blockN uint64, s *ChangeSet, f func(k, v []byte) error) error {
- sort.Sort(s)
- keyPart := length.Addr + length.Incarnation
- for _, cs := range s.Changes {
- newK := make([]byte, length.BlockNum+keyPart)
- binary.BigEndian.PutUint64(newK, blockN)
- copy(newK[8:], cs.Key[:keyPart])
- newV := make([]byte, 0, length.Hash+len(cs.Value))
- newV = append(append(newV, cs.Key[keyPart:]...), cs.Value...)
- if err := f(newK, newV); err != nil {
- return err
- }
- }
- return nil
-}
-
func DecodeStorage(dbKey, dbValue []byte) (uint64, []byte, []byte, error) {
blockN := binary.BigEndian.Uint64(dbKey)
if len(dbValue) < length.Hash {
@@ -70,18 +39,3 @@ func DecodeStorage(dbKey, dbValue []byte) (uint64, []byte, []byte, error) {
return blockN, k, v, nil
}
-
-func FindStorage(c kv.CursorDupSort, blockNumber uint64, k []byte) ([]byte, error) {
- addWithInc, loc := k[:length.Addr+length.Incarnation], k[length.Addr+length.Incarnation:]
- seek := make([]byte, length.BlockNum+length.Addr+length.Incarnation)
- binary.BigEndian.PutUint64(seek, blockNumber)
- copy(seek[8:], addWithInc)
- v, err := c.SeekBothRange(seek, loc)
- if err != nil {
- return nil, err
- }
- if !bytes.HasPrefix(v, loc) {
- return nil, ErrNotFound
- }
- return v[length.Hash:], nil
-}
diff --git a/erigon-lib/trie/trie_root.go b/erigon-lib/trie/trie_root.go
index 0b598f21c75..b1694b2bbcc 100644
--- a/erigon-lib/trie/trie_root.go
+++ b/erigon-lib/trie/trie_root.go
@@ -203,7 +203,7 @@ func (l *FlatDBTrieLoader) SetProofRetainer(pr *ProofRetainer) {
// }
func (l *FlatDBTrieLoader) CalcTrieRoot(tx kv.Tx, quit <-chan struct{}) (libcommon.Hash, error) {
- accC, err := tx.Cursor(kv.HashedAccounts)
+ accC, err := tx.Cursor(kv.HashedAccountsDeprecated)
if err != nil {
return EmptyRoot, err
}
@@ -227,7 +227,7 @@ func (l *FlatDBTrieLoader) CalcTrieRoot(tx kv.Tx, quit <-chan struct{}) (libcomm
accTrie := AccTrie(canUse, l.hc, trieAccC, quit)
storageTrie := StorageTrie(canUse, l.shc, trieStorageC, quit)
- ss, err := tx.CursorDupSort(kv.HashedStorage)
+ ss, err := tx.CursorDupSort(kv.HashedStorageDeprecated)
if err != nil {
return EmptyRoot, err
}
diff --git a/turbo/jsonrpc/eth_call_test.go b/turbo/jsonrpc/eth_call_test.go
index 4c5d9298713..c8dfee1d378 100644
--- a/turbo/jsonrpc/eth_call_test.go
+++ b/turbo/jsonrpc/eth_call_test.go
@@ -576,10 +576,10 @@ func doPrune(t *testing.T, db kv.RwDB, pruneTo uint64) {
logEvery := time.NewTicker(20 * time.Second)
- err = rawdb.PruneTableDupSort(tx, kv.AccountChangeSet, "", pruneTo, logEvery, ctx)
+ err = rawdb.PruneTableDupSort(tx, kv.TblAccountVals, "", pruneTo, logEvery, ctx)
assert.NoError(t, err)
- err = rawdb.PruneTableDupSort(tx, kv.StorageChangeSet, "", pruneTo, logEvery, ctx)
+ err = rawdb.PruneTableDupSort(tx, kv.StorageChangeSetDeprecated, "", pruneTo, logEvery, ctx)
assert.NoError(t, err)
err = rawdb.PruneTable(tx, kv.Receipts, pruneTo, ctx, math.MaxInt32, time.Hour, logger, "")