diff --git a/chain.json b/chain.json index fc5c56a658..20a6c96b20 100644 --- a/chain.json +++ b/chain.json @@ -10,5 +10,7 @@ "is-validator": true, "trading-api-enabled": true, "testing-api-enabled": true, - "load-from-snapshot-enabled": true + "load-from-snapshot-enabled": true, + "snapshot-file-path": "/tmp/snapshot", + "makerbook-database-path": "/tmp/makerbook" } diff --git a/network-configs/aylin/chain.json b/network-configs/aylin/chain.json index 28a662f770..a01a49286f 100644 --- a/network-configs/aylin/chain.json +++ b/network-configs/aylin/chain.json @@ -8,5 +8,7 @@ "priority-regossip-addresses": ["0x06CCAD927e6B1d36E219Cb582Af3185D0705f78F"], "validator-private-key-file": "/home/ubuntu/validator.pk", "feeRecipient": "", - "is-validator": true + "is-validator": true, + "snapshot-file-path": "/tmp/snapshot", + "makerbook-database-path": "/tmp/makerbook" } diff --git a/network-configs/aylin/chain_api_node.json b/network-configs/aylin/chain_api_node.json index 6a61c830b9..708c0556d3 100644 --- a/network-configs/aylin/chain_api_node.json +++ b/network-configs/aylin/chain_api_node.json @@ -9,5 +9,7 @@ "coreth-admin-api-enabled": true, "eth-apis": ["public-eth","public-eth-filter","net","web3","internal-eth","internal-blockchain","internal-transaction","internal-debug","internal-tx-pool","internal-account","debug-tracer"], "trading-api-enabled": true, - "testing-api-enabled": true + "testing-api-enabled": true, + "snapshot-file-path": "/tmp/snapshot", + "makerbook-database-path": "/tmp/makerbook" } diff --git a/network-configs/aylin/chain_archival_node.json b/network-configs/aylin/chain_archival_node.json index cf2c361794..2d43af04de 100644 --- a/network-configs/aylin/chain_archival_node.json +++ b/network-configs/aylin/chain_archival_node.json @@ -10,5 +10,7 @@ "coreth-admin-api-enabled": true, "eth-apis": ["public-eth","public-eth-filter","net","web3","internal-public-eth","internal-blockchain","internal-transaction","internal-debug","internal-tx-pool","internal-account","debug-tracer"], "trading-api-enabled": true, - "testing-api-enabled": true + "testing-api-enabled": true, + "snapshot-file-path": "/tmp/snapshot", + "makerbook-database-path": "/tmp/makerbook" } diff --git a/network-configs/hubblenet/chain_api_node.json b/network-configs/hubblenet/chain_api_node.json index 2deb5bf5c5..18ec743005 100644 --- a/network-configs/hubblenet/chain_api_node.json +++ b/network-configs/hubblenet/chain_api_node.json @@ -12,5 +12,7 @@ "admin-api-enabled": true, "eth-apis": ["public-eth","public-eth-filter","net","web3","internal-public-eth","internal-blockchain","internal-transaction","internal-debug","internal-tx-pool","internal-account","debug-tracer"], "trading-api-enabled": true, - "testing-api-enabled": true + "testing-api-enabled": true, + "snapshot-file-path": "/tmp/snapshot", + "makerbook-database-path": "/tmp/makerbook" } diff --git a/network-configs/hubblenet/chain_archival_node.json b/network-configs/hubblenet/chain_archival_node.json index dfcf759fb4..2471a78dc8 100644 --- a/network-configs/hubblenet/chain_archival_node.json +++ b/network-configs/hubblenet/chain_archival_node.json @@ -13,5 +13,7 @@ "admin-api-enabled": true, "eth-apis": ["public-eth","public-eth-filter","net","web3","internal-public-eth","internal-blockchain","internal-transaction","internal-debug","internal-tx-pool","internal-account","debug-tracer"], "trading-api-enabled": true, - "testing-api-enabled": true + "testing-api-enabled": true, + "snapshot-file-path": "/tmp/snapshot", + "makerbook-database-path": "/tmp/makerbook" } diff --git a/network-configs/hubblenet/chain_validator_1.json b/network-configs/hubblenet/chain_validator_1.json index 6f8d868eaa..b5694f7d3a 100644 --- a/network-configs/hubblenet/chain_validator_1.json +++ b/network-configs/hubblenet/chain_validator_1.json @@ -11,5 +11,6 @@ "continuous-profiler-frequency": "10m", "validator-private-key-file": "/var/avalanche/validator.pk", "feeRecipient": "0xa5e31FbE901362Cc93b6fdab99DB9741c673a942", - "is-validator": true + "is-validator": true, + "snapshot-file-path": "/tmp/snapshot" } diff --git a/plugin/evm/config.go b/plugin/evm/config.go index a165fe89f5..905c6b95fb 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -63,6 +63,8 @@ const ( defaultIsValidator = false defaultTradingAPIEnabled = false defaultLoadFromSnapshotEnabled = true + defaultSnapshotFilePath = "/home/ubuntu/.avalanche-cli/snapshot/evm.snapshot" + defaultMakerbookDatabasePath = "/home/ubuntu/.avalanche-cli/evm/makerbook.db" ) var ( @@ -242,6 +244,12 @@ type Config struct { // LoadFromSnapshotEnabled = true if the node should load the memory db from a snapshot LoadFromSnapshotEnabled bool `json:"load-from-snapshot-enabled"` + + // SnapshotFilePath is the path to the file which saves the latest snapshot bytes + SnapshotFilePath string `json:"snapshot-file-path"` + + // MakerbookDatabasePath is the path to the file which saves the makerbook orders + MakerbookDatabasePath string `json:"makerbook-database-path"` } // EthAPIs returns an array of strings representing the Eth APIs that should be enabled @@ -306,6 +314,8 @@ func (c *Config) SetDefaults() { c.IsValidator = defaultIsValidator c.TradingAPIEnabled = defaultTradingAPIEnabled c.LoadFromSnapshotEnabled = defaultLoadFromSnapshotEnabled + c.SnapshotFilePath = defaultSnapshotFilePath + c.MakerbookDatabasePath = defaultMakerbookDatabasePath } func (d *Duration) UnmarshalJSON(data []byte) (err error) { diff --git a/plugin/evm/limit_order.go b/plugin/evm/limit_order.go index 778b00410b..344f118adb 100644 --- a/plugin/evm/limit_order.go +++ b/plugin/evm/limit_order.go @@ -6,6 +6,7 @@ import ( "encoding/gob" "fmt" "math/big" + "os" "runtime" "runtime/debug" "sync" @@ -58,6 +59,7 @@ type limitOrderProcesser struct { tradingAPIEnabled bool loadFromSnapshotEnabled bool snapshotSavedBlockNumber uint64 + snapshotFilePath string tradingAPI *orderbook.TradingAPI } @@ -109,6 +111,7 @@ func NewLimitOrderProcesser(ctx *snow.Context, txPool *txpool.TxPool, shutdownCh isValidator: config.IsValidator, tradingAPIEnabled: config.TradingAPIEnabled, loadFromSnapshotEnabled: config.LoadFromSnapshotEnabled, + snapshotFilePath: config.SnapshotFilePath, } } @@ -128,7 +131,6 @@ func (lop *limitOrderProcesser) ListenAndProcessTransactions(blockBuilder *block } else { if acceptedBlockNumber > 0 { fromBlock = big.NewInt(int64(acceptedBlockNumber) + 1) - log.Info("ListenAndProcessTransactions - memory DB snapshot loaded", "acceptedBlockNumber", acceptedBlockNumber) } else { // not an error, but unlikely after the blockchain is running for some time log.Warn("ListenAndProcessTransactions - no snapshot found") @@ -349,6 +351,15 @@ func (lop *limitOrderProcesser) runMatchingTimer() { } func (lop *limitOrderProcesser) loadMemoryDBSnapshot() (acceptedBlockNumber uint64, err error) { + // logging is done in the respective functions + acceptedBlockNumber, err = lop.loadMemoryDBSnapshotFromHubbleDB() + if err != nil || acceptedBlockNumber == 0 { + acceptedBlockNumber, err = lop.loadMemoryDBSnapshotFromFile() + } + return acceptedBlockNumber, err +} + +func (lop *limitOrderProcesser) loadMemoryDBSnapshotFromHubbleDB() (acceptedBlockNumber uint64, err error) { snapshotFound, err := lop.hubbleDB.Has([]byte(memoryDBSnapshotKey)) if err != nil { return acceptedBlockNumber, fmt.Errorf("Error in checking snapshot in hubbleDB: err=%v", err) @@ -367,13 +378,46 @@ func (lop *limitOrderProcesser) loadMemoryDBSnapshot() (acceptedBlockNumber uint var snapshot orderbook.Snapshot err = gob.NewDecoder(buf).Decode(&snapshot) if err != nil { - return acceptedBlockNumber, fmt.Errorf("Error in snapshot parsing; err=%v", err) + return acceptedBlockNumber, fmt.Errorf("Error in snapshot parsing from hubbleDB; err=%v", err) } if snapshot.AcceptedBlockNumber != nil && snapshot.AcceptedBlockNumber.Uint64() > 0 { err = lop.memoryDb.LoadFromSnapshot(snapshot) if err != nil { - return acceptedBlockNumber, fmt.Errorf("Error in loading from snapshot: err=%v", err) + return acceptedBlockNumber, fmt.Errorf("Error in loading snapshot from hubbleDB: err=%v", err) + } else { + log.Info("ListenAndProcessTransactions - memory DB snapshot loaded from hubbleDB", "acceptedBlockNumber", acceptedBlockNumber) + } + + return snapshot.AcceptedBlockNumber.Uint64(), nil + } else { + return acceptedBlockNumber, nil + } +} + +func (lop *limitOrderProcesser) loadMemoryDBSnapshotFromFile() (acceptedBlockNumber uint64, err error) { + if lop.snapshotFilePath == "" { + return acceptedBlockNumber, nil + } + + memorySnapshotBytes, err := os.ReadFile(lop.snapshotFilePath) + if err != nil { + return acceptedBlockNumber, fmt.Errorf("Error in reading snapshot file: err=%v", err) + } + + buf := bytes.NewBuffer(memorySnapshotBytes) + var snapshot orderbook.Snapshot + err = gob.NewDecoder(buf).Decode(&snapshot) + if err != nil { + return acceptedBlockNumber, fmt.Errorf("Error in snapshot parsing from file; err=%v", err) + } + + if snapshot.AcceptedBlockNumber != nil && snapshot.AcceptedBlockNumber.Uint64() > 0 { + err = lop.memoryDb.LoadFromSnapshot(snapshot) + if err != nil { + return acceptedBlockNumber, fmt.Errorf("Error in loading snapshot from file: err=%v", err) + } else { + log.Info("ListenAndProcessTransactions - memory DB snapshot loaded from file", "acceptedBlockNumber", acceptedBlockNumber) } return snapshot.AcceptedBlockNumber.Uint64(), nil @@ -428,11 +472,21 @@ func (lop *limitOrderProcesser) saveMemoryDBSnapshot(acceptedBlockNumber *big.In return fmt.Errorf("error in gob encoding: err=%v", err) } - err = lop.hubbleDB.Put([]byte(memoryDBSnapshotKey), buf.Bytes()) + snapshotBytes := buf.Bytes() + + err = lop.hubbleDB.Put([]byte(memoryDBSnapshotKey), snapshotBytes) if err != nil { return fmt.Errorf("Error in saving to DB: err=%v", err) } + // write to snapshot file + if lop.snapshotFilePath != "" { + err = os.WriteFile(lop.snapshotFilePath, snapshotBytes, 0644) + if err != nil { + return fmt.Errorf("Error in writing to snapshot file: err=%v", err) + } + } + lop.snapshotSavedBlockNumber = acceptedBlockNumber.Uint64() log.Info("Saved memory DB snapshot successfully", "accepted block", acceptedBlockNumber, "head block number", currentHeadBlock.Number, "head block hash", currentHeadBlock.Hash(), "duration", time.Since(start)) diff --git a/plugin/evm/orderbook/metrics.go b/plugin/evm/orderbook/metrics.go index cc42690f72..e44c93da3f 100644 --- a/plugin/evm/orderbook/metrics.go +++ b/plugin/evm/orderbook/metrics.go @@ -39,4 +39,7 @@ var ( // unquenched liquidations unquenchedLiquidationsCounter = metrics.NewRegisteredCounter("unquenched_liquidations", nil) placeSignedOrderCounter = metrics.NewRegisteredCounter("place_signed_order", nil) + + // makerbook write failures + makerBookWriteFailuresCounter = metrics.NewRegisteredCounter("makerbook_write_failures", nil) ) diff --git a/plugin/evm/orderbook/trading_apis.go b/plugin/evm/orderbook/trading_apis.go index 8f77b9f21e..da91badf27 100644 --- a/plugin/evm/orderbook/trading_apis.go +++ b/plugin/evm/orderbook/trading_apis.go @@ -5,6 +5,7 @@ package orderbook import ( "context" + "encoding/json" "errors" "fmt" "math/big" @@ -17,10 +18,12 @@ import ( "github.com/ava-labs/subnet-evm/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" ) var traderFeed event.Feed var marketFeed event.Feed +var SignedOrderDatabaseFile = "" type TradingAPI struct { db LimitOrderDatabase @@ -340,6 +343,7 @@ func (api *TradingAPI) PlaceOrder(order *hu.SignedOrder) (common.Hash, error) { return orderId, err } if trader != signer && !api.configService.IsTradingAuthority(trader, signer) { + log.Error("not trading authority", "trader", trader.String(), "signer", signer.String()) return orderId, hu.ErrNoTradingAuthority } @@ -393,6 +397,10 @@ func (api *TradingAPI) PlaceOrder(order *hu.SignedOrder) (common.Hash, error) { placeSignedOrderCounter.Inc(1) api.db.AddSignedOrder(signedOrder, requiredMargin) + if len(SignedOrderDatabaseFile) > 0 { + go writeOrderToFile(order, orderId) + } + // send to trader feed - both for head and accepted block go func() { orderMap := order.Map() @@ -421,3 +429,37 @@ func (api *TradingAPI) PlaceOrder(order *hu.SignedOrder) (common.Hash, error) { return orderId, nil } + +func writeOrderToFile(order *hu.SignedOrder, orderId common.Hash) { + orderMap := order.Map() + orderMap["orderType"] = "signed" + orderMap["expireAt"] = order.ExpireAt.String() + doc := map[string]interface{}{ + "type": "OrderAccepted", + "timestamp": time.Now().Unix(), + "trader": order.Trader.String(), + "orderHash": strings.ToLower(orderId.String()), + "orderType": "signed", + "order": map[string]interface{}{ + "orderType": 2, + "expireAt": order.ExpireAt.Uint64(), + "ammIndex": order.AmmIndex.Uint64(), + "trader": order.Trader.String(), + "baseAssetQuantity": utils.BigIntToFloat(order.BaseAssetQuantity, 18), + "price": utils.BigIntToFloat(order.Price, 6), + "salt": order.Salt.Int64(), + "reduceOnly": order.ReduceOnly, + }, + } + jsonDoc, err := json.Marshal(doc) + if err != nil { + log.Error("writeOrderToFile: failed to marshal order", "err", err) + makerBookWriteFailuresCounter.Inc(1) + return + } + err = utils.AppendToFile(SignedOrderDatabaseFile, jsonDoc) + if err != nil { + log.Error("writeOrderToFile: failed to write order to file", "err", err) + makerBookWriteFailuresCounter.Inc(1) + } +} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index d8bdfd2e48..6dbc306739 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1011,6 +1011,7 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { if err := handler.RegisterName("order", NewOrderAPI(vm.limitOrderProcesser.GetTradingAPI(), vm)); err != nil { return nil, err } + orderbook.SignedOrderDatabaseFile = vm.config.MakerbookDatabasePath if err := handler.RegisterName("orderbook", vm.limitOrderProcesser.GetOrderBookAPI()); err != nil { return nil, err diff --git a/utils/file.go b/utils/file.go new file mode 100644 index 0000000000..b818ae7c37 --- /dev/null +++ b/utils/file.go @@ -0,0 +1,20 @@ +package utils + +import ( + "os" +) + +func AppendToFile(file string, data []byte) error { + f, err := os.OpenFile(file, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + // Add a newline to the beginning of the data + data = append([]byte("\n"), data...) + if _, err := f.Write(data); err != nil { + return err + } + return nil +}