From 1ca6e748aca73ca8eed40980ae9f166c2dc63a8a Mon Sep 17 00:00:00 2001 From: PatStiles Date: Thu, 21 Nov 2024 16:46:37 -0300 Subject: [PATCH 01/12] refactor retry to use config --- core/retry.go | 80 +++++++++++++++++++++++++++++++++++---------------- 1 file changed, 55 insertions(+), 25 deletions(-) diff --git a/core/retry.go b/core/retry.go index fe872f658..ff9a367f6 100644 --- a/core/retry.go +++ b/core/retry.go @@ -25,15 +25,47 @@ func (e PermanentError) Is(err error) bool { } const ( - MinDelay = 1 * time.Second // Initial delay for retry interval. - MaxInterval = 60 * time.Second // Maximum interval an individual retry may have. - MaxElapsedTime = 0 * time.Second // Maximum time all retries may take. `0` corresponds to no limit on the time of the retries. - RetryFactor float64 = 2 // Multiplier factor computed exponential retry interval is scaled by. - NumRetries uint64 = 3 // Total number of retries attempted. - MinDelayChain = 12 * time.Second // Initial delay for retry interval for contract calls. Corresponds to 1 ethereum block. - MaxIntervalChain = 2 * time.Minute // Maximum interval for an individual retry. + EthCallInitialInterval = 1 * time.Second // Initial delay for retry interval. + EthCallMaxInterval = 60 * time.Second // Maximum interval an individual retry may have. + EthCallMaxElapsedTime = 0 * time.Second // Maximum time all retries may take. `0` corresponds to no limit on the time of the retries. + EthCallRandomizationFactor float64 = 0 // Randomization (Jitter) factor used to map retry interval to a range of values around the computed interval. In precise terms (random value in range [1 - randomizationfactor, 1 + randomizationfactor]). NOTE: This is set to 0 as we do not use jitter in Aligned. + EthCallMultiplier float64 = 2 // Multiplier factor computed exponential retry interval is scaled by. + EthCallNumRetries uint64 = 3 // Total number of retries attempted. + ChainInitialInterval = 12 * time.Second // Initial delay for retry interval for contract calls. Corresponds to 1 ethereum block. + ChainMaxInterval = 2 * time.Minute // Maximum interval for an individual retry. ) +type RetryConfig struct { + InitialInterval time.Duration // Initial delay for retry interval. + MaxInterval time.Duration // Maximum interval an individual retry may have. + MaxElapsedTime time.Duration // Maximum time all retries may take. `0` corresponds to no limit on the time of the retries. + RandomizationFactor float64 + Multiplier float64 + NumRetries uint64 +} + +func EthCallRetryConfig() *RetryConfig { + return &RetryConfig{ + InitialInterval: EthCallInitialInterval, + MaxInterval: EthCallMaxInterval, + MaxElapsedTime: EthCallMaxElapsedTime, + RandomizationFactor: EthCallRandomizationFactor, + Multiplier: EthCallMultiplier, + NumRetries: EthCallNumRetries, + } +} + +func ChainRetryConfig() *RetryConfig { + return &RetryConfig{ + InitialInterval: ChainInitialInterval, + MaxInterval: ChainMaxInterval, + MaxElapsedTime: EthCallMaxElapsedTime, + RandomizationFactor: EthCallRandomizationFactor, + Multiplier: EthCallMultiplier, + NumRetries: EthCallNumRetries, + } +} + /* Retry and RetryWithData are custom retry functions used in Aligned's aggregator and operator to facilitate consistent retry logic across the system. They are interfaces for around Cenk Alti (https://github.com/cenkalti) backoff library (https://github.com/cenkalti/backoff). We would like to thank him for his great work. @@ -92,7 +124,7 @@ Reference: https://github.com/cenkalti/backoff/blob/v4/exponential.go#L9 */ // Same as Retry only that the functionToRetry can return a value upon correct execution -func RetryWithData[T any](functionToRetry func() (T, error), minDelay time.Duration, factor float64, maxTries uint64, maxInterval time.Duration, maxElapsedTime time.Duration) (T, error) { +func RetryWithData[T any](functionToRetry func() (T, error), config *RetryConfig) (T, error) { f := func() (T, error) { var ( val T @@ -118,17 +150,16 @@ func RetryWithData[T any](functionToRetry func() (T, error), minDelay time.Durat return val, err } - randomOption := backoff.WithRandomizationFactor(0) - - initialRetryOption := backoff.WithInitialInterval(minDelay) - multiplierOption := backoff.WithMultiplier(factor) - maxIntervalOption := backoff.WithMaxInterval(maxInterval) - maxElapsedTimeOption := backoff.WithMaxElapsedTime(maxElapsedTime) + initialRetryOption := backoff.WithInitialInterval(config.InitialInterval) + multiplierOption := backoff.WithMultiplier(config.Multiplier) + maxIntervalOption := backoff.WithMaxInterval(config.MaxInterval) + maxElapsedTimeOption := backoff.WithMaxElapsedTime(config.MaxElapsedTime) + randomOption := backoff.WithRandomizationFactor(config.RandomizationFactor) expBackoff := backoff.NewExponentialBackOff(randomOption, multiplierOption, initialRetryOption, maxIntervalOption, maxElapsedTimeOption) var maxRetriesBackoff backoff.BackOff - if maxTries > 0 { - maxRetriesBackoff = backoff.WithMaxRetries(expBackoff, maxTries) + if config.NumRetries > 0 { + maxRetriesBackoff = backoff.WithMaxRetries(expBackoff, config.NumRetries) } else { maxRetriesBackoff = expBackoff } @@ -142,7 +173,7 @@ func RetryWithData[T any](functionToRetry func() (T, error), minDelay time.Durat // from the configuration are reached, or until a `PermanentError` is returned. // The function to be retried should return `PermanentError` when the condition for stop retrying // is met. -func Retry(functionToRetry func() error, minDelay time.Duration, factor float64, maxTries uint64, maxInterval time.Duration, maxElapsedTime time.Duration) error { +func Retry(functionToRetry func() error, config *RetryConfig) error { f := func() error { var err error func() { @@ -165,17 +196,16 @@ func Retry(functionToRetry func() error, minDelay time.Duration, factor float64, return err } - randomOption := backoff.WithRandomizationFactor(0) - - initialRetryOption := backoff.WithInitialInterval(minDelay) - multiplierOption := backoff.WithMultiplier(factor) - maxIntervalOption := backoff.WithMaxInterval(maxInterval) - maxElapsedTimeOption := backoff.WithMaxElapsedTime(maxElapsedTime) + initialRetryOption := backoff.WithInitialInterval(config.InitialInterval) + multiplierOption := backoff.WithMultiplier(config.Multiplier) + maxIntervalOption := backoff.WithMaxInterval(config.MaxInterval) + maxElapsedTimeOption := backoff.WithMaxElapsedTime(config.MaxElapsedTime) + randomOption := backoff.WithRandomizationFactor(config.RandomizationFactor) expBackoff := backoff.NewExponentialBackOff(randomOption, multiplierOption, initialRetryOption, maxIntervalOption, maxElapsedTimeOption) var maxRetriesBackoff backoff.BackOff - if maxTries > 0 { - maxRetriesBackoff = backoff.WithMaxRetries(expBackoff, maxTries) + if config.NumRetries > 0 { + maxRetriesBackoff = backoff.WithMaxRetries(expBackoff, config.NumRetries) } else { maxRetriesBackoff = expBackoff } From b1640e0b894a6dd88f5fd8cddfec8fd84122b805 Mon Sep 17 00:00:00 2001 From: PatStiles Date: Thu, 21 Nov 2024 16:51:59 -0300 Subject: [PATCH 02/12] add retry config to avs_writer --- core/chainio/avs_writer.go | 27 +++++++++++++++++++++++---- core/utils/eth_client_utils.go | 9 ++++----- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/core/chainio/avs_writer.go b/core/chainio/avs_writer.go index 63a3da914..d419ad2ca 100644 --- a/core/chainio/avs_writer.go +++ b/core/chainio/avs_writer.go @@ -22,6 +22,14 @@ import ( "github.com/yetanotherco/aligned_layer/metrics" ) +const ( + waitForTxMaxInterval = 2 * time.Second + waitForTxNumRetries = 0 + respondToTaskV2NumRetries uint64 = 0 + respondToTaskV2MaxInterval = time.Millisecond * 500 + respondToTaskV2MaxElapsedTime = 0 +) + type AvsWriter struct { *avsregistry.ChainWriter AvsContractBindings *AvsServiceBindings @@ -104,12 +112,24 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe txOpts.NoSend = false i := 0 + // Set Retry config for RespondToTaskV2 + respondToTaskV2Config := retry.EthCallRetryConfig() + respondToTaskV2Config.NumRetries = respondToTaskV2NumRetries + respondToTaskV2Config.MaxInterval = respondToTaskV2MaxInterval + respondToTaskV2Config.MaxElapsedTime = respondToTaskV2MaxElapsedTime + + // Set Retry config for WaitForTxRetryable + waitForTxConfig := retry.EthCallRetryConfig() + waitForTxConfig.MaxInterval = waitForTxMaxInterval + waitForTxConfig.NumRetries = waitForTxNumRetries + waitForTxConfig.MaxElapsedTime = timeToWaitBeforeBump + var sentTxs []*types.Transaction batchMerkleRootHashString := hex.EncodeToString(batchMerkleRoot[:]) respondToTaskV2Func := func() (*types.Receipt, error) { - gasPrice, err := utils.GetGasPriceRetryable(w.Client, w.ClientFallback) + gasPrice, err := utils.GetGasPriceRetryable(w.Client, w.ClientFallback, retry.EthCallRetryConfig()) if err != nil { return nil, err } @@ -171,7 +191,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe sentTxs = append(sentTxs, realTx) w.logger.Infof("Transaction sent, waiting for receipt", "merkle root", batchMerkleRootHashString) - receipt, err := utils.WaitForTransactionReceiptRetryable(w.Client, w.ClientFallback, realTx.Hash(), timeToWaitBeforeBump) + receipt, err := utils.WaitForTransactionReceiptRetryable(w.Client, w.ClientFallback, realTx.Hash(), waitForTxConfig) if receipt != nil { w.checkIfAggregatorHadToPaidForBatcher(realTx, batchIdentifierHash) return receipt, nil @@ -191,8 +211,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe // This just retries the bump of a fee in case of a timeout // The wait is done before on WaitForTransactionReceiptRetryable, and all the functions are retriable, // so this retry doesn't need to wait more time - maxInterval := time.Millisecond * 500 - return retry.RetryWithData(respondToTaskV2Func, retry.MinDelay, retry.RetryFactor, 0, maxInterval, 0) + return retry.RetryWithData(respondToTaskV2Func, respondToTaskV2Config) } // Calculates the transaction cost from the receipt and compares it with the batcher respondToTaskFeeLimit diff --git a/core/utils/eth_client_utils.go b/core/utils/eth_client_utils.go index 36b28f036..c0c633977 100644 --- a/core/utils/eth_client_utils.go +++ b/core/utils/eth_client_utils.go @@ -3,7 +3,6 @@ package utils import ( "context" "math/big" - "time" "github.com/Layr-Labs/eigensdk-go/chainio/clients/eth" eigentypes "github.com/Layr-Labs/eigensdk-go/types" @@ -20,7 +19,7 @@ import ( // Setting a higher value will imply doing less retries across the waitTimeout, and so we might lose the receipt // All errors are considered Transient Errors // - Retry times: 0.5s, 1s, 2s, 2s, 2s, ... until it reaches waitTimeout -func WaitForTransactionReceiptRetryable(client eth.InstrumentedClient, fallbackClient eth.InstrumentedClient, txHash gethcommon.Hash, waitTimeout time.Duration) (*types.Receipt, error) { +func WaitForTransactionReceiptRetryable(client eth.InstrumentedClient, fallbackClient eth.InstrumentedClient, txHash gethcommon.Hash, config *retry.RetryConfig) (*types.Receipt, error) { receipt_func := func() (*types.Receipt, error) { receipt, err := client.TransactionReceipt(context.Background(), txHash) if err != nil { @@ -32,7 +31,7 @@ func WaitForTransactionReceiptRetryable(client eth.InstrumentedClient, fallbackC } return receipt, nil } - return retry.RetryWithData(receipt_func, retry.MinDelay, retry.RetryFactor, 0, time.Second*2, waitTimeout) + return retry.RetryWithData(receipt_func, config) } func BytesToQuorumNumbers(quorumNumbersBytes []byte) eigentypes.QuorumNums { @@ -87,7 +86,7 @@ Get the gas price from the client with retry logic. - All errors are considered Transient Errors - Retry times: 1 sec, 2 sec, 4 sec */ -func GetGasPriceRetryable(client eth.InstrumentedClient, fallbackClient eth.InstrumentedClient) (*big.Int, error) { +func GetGasPriceRetryable(client eth.InstrumentedClient, fallbackClient eth.InstrumentedClient, config *retry.RetryConfig) (*big.Int, error) { respondToTaskV2_func := func() (*big.Int, error) { gasPrice, err := client.SuggestGasPrice(context.Background()) if err != nil { @@ -99,5 +98,5 @@ func GetGasPriceRetryable(client eth.InstrumentedClient, fallbackClient eth.Inst return gasPrice, nil } - return retry.RetryWithData(respondToTaskV2_func, retry.MinDelay, retry.RetryFactor, retry.NumRetries, retry.MaxInterval, retry.MaxElapsedTime) + return retry.RetryWithData(respondToTaskV2_func, config) } From 73b6e6ec8e60be996dcb9be961be71a77e212062 Mon Sep 17 00:00:00 2001 From: PatStiles Date: Thu, 21 Nov 2024 17:22:52 -0300 Subject: [PATCH 03/12] use config in retryable functions --- aggregator/pkg/server.go | 2 +- core/chainio/retryable.go | 42 ++++++++++++++++++++------------------- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/aggregator/pkg/server.go b/aggregator/pkg/server.go index 1eccfeb78..e8efa4006 100644 --- a/aggregator/pkg/server.go +++ b/aggregator/pkg/server.go @@ -118,5 +118,5 @@ func (agg *Aggregator) GetTaskIndex(batchIdentifierHash [32]byte) (uint32, error } } - return retry.RetryWithData(getTaskIndex_func, retry.MinDelay, retry.RetryFactor, retry.NumRetries, retry.MaxInterval, retry.MaxElapsedTime) + return retry.RetryWithData(getTaskIndex_func, retry.EthCallRetryConfig()) } diff --git a/core/chainio/retryable.go b/core/chainio/retryable.go index ad44a2509..8df721491 100644 --- a/core/chainio/retryable.go +++ b/core/chainio/retryable.go @@ -22,7 +22,7 @@ Send a transaction to the AVS contract to respond to a task. - Retry times (3 retries): 12 sec (1 Blocks), 24 sec (2 Blocks), 48 sec (4 Blocks) - NOTE: Contract call reverts are not considered `PermanentError`'s as block reorg's may lead to contract call revert in which case the aggregator should retry. */ -func (w *AvsWriter) RespondToTaskV2Retryable(opts *bind.TransactOpts, batchMerkleRoot [32]byte, senderAddress common.Address, nonSignerStakesAndSignature servicemanager.IBLSSignatureCheckerNonSignerStakesAndSignature) (*types.Transaction, error) { +func (w *AvsWriter) RespondToTaskV2Retryable(opts *bind.TransactOpts, batchMerkleRoot [32]byte, senderAddress common.Address, nonSignerStakesAndSignature servicemanager.IBLSSignatureCheckerNonSignerStakesAndSignature, config *retry.RetryConfig) (*types.Transaction, error) { respondToTaskV2_func := func() (*types.Transaction, error) { // Try with main connection tx, err := w.AvsContractBindings.ServiceManager.RespondToTaskV2(opts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature) @@ -33,7 +33,7 @@ func (w *AvsWriter) RespondToTaskV2Retryable(opts *bind.TransactOpts, batchMerkl return tx, err } - return retry.RetryWithData(respondToTaskV2_func, retry.MinDelayChain, retry.RetryFactor, retry.NumRetries, retry.MaxIntervalChain, retry.MaxElapsedTime) + return retry.RetryWithData(respondToTaskV2_func, config) } /* @@ -42,7 +42,7 @@ Get the state of a batch from the AVS contract. - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec */ -func (w *AvsWriter) BatchesStateRetryable(opts *bind.CallOpts, arg0 [32]byte) (struct { +func (w *AvsWriter) BatchesStateRetryable(opts *bind.CallOpts, arg0 [32]byte, config *retry.RetryConfig) (struct { TaskCreatedBlock uint32 Responded bool RespondToTaskFeeLimit *big.Int @@ -61,7 +61,7 @@ func (w *AvsWriter) BatchesStateRetryable(opts *bind.CallOpts, arg0 [32]byte) (s } return state, err } - return retry.RetryWithData(batchesState_func, retry.MinDelay, retry.RetryFactor, retry.NumRetries, retry.MaxInterval, retry.MaxElapsedTime) + return retry.RetryWithData(batchesState_func, config) } /* @@ -70,7 +70,7 @@ Get the balance of a batcher from the AVS contract. - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec */ -func (w *AvsWriter) BatcherBalancesRetryable(opts *bind.CallOpts, senderAddress common.Address) (*big.Int, error) { +func (w *AvsWriter) BatcherBalancesRetryable(opts *bind.CallOpts, senderAddress common.Address, config *retry.RetryConfig) (*big.Int, error) { batcherBalances_func := func() (*big.Int, error) { // Try with main connection batcherBalance, err := w.AvsContractBindings.ServiceManager.BatchersBalances(opts, senderAddress) @@ -80,7 +80,7 @@ func (w *AvsWriter) BatcherBalancesRetryable(opts *bind.CallOpts, senderAddress } return batcherBalance, err } - return retry.RetryWithData(batcherBalances_func, retry.MinDelay, retry.RetryFactor, retry.NumRetries, retry.MaxInterval, retry.MaxElapsedTime) + return retry.RetryWithData(batcherBalances_func, config) } /* @@ -91,7 +91,7 @@ TODO: it gets the balance from an Address, not necessarily an aggregator. The na - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec. */ -func (w *AvsWriter) BalanceAtRetryable(ctx context.Context, aggregatorAddress common.Address, blockNumber *big.Int) (*big.Int, error) { +func (w *AvsWriter) BalanceAtRetryable(ctx context.Context, aggregatorAddress common.Address, blockNumber *big.Int, config *retry.RetryConfig) (*big.Int, error) { balanceAt_func := func() (*big.Int, error) { // Try with main connection aggregatorBalance, err := w.Client.BalanceAt(ctx, aggregatorAddress, blockNumber) @@ -101,7 +101,7 @@ func (w *AvsWriter) BalanceAtRetryable(ctx context.Context, aggregatorAddress co } return aggregatorBalance, err } - return retry.RetryWithData(balanceAt_func, retry.MinDelay, retry.RetryFactor, retry.NumRetries, retry.MaxInterval, retry.MaxElapsedTime) + return retry.RetryWithData(balanceAt_func, config) } // |---AVS_SUBSCRIBER---| @@ -112,7 +112,7 @@ Get the latest block number from Ethereum - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec. */ -func (s *AvsSubscriber) BlockNumberRetryable(ctx context.Context) (uint64, error) { +func (s *AvsSubscriber) BlockNumberRetryable(ctx context.Context, config *retry.RetryConfig) (uint64, error) { latestBlock_func := func() (uint64, error) { // Try with main connection latestBlock, err := s.AvsContractBindings.ethClient.BlockNumber(ctx) @@ -122,7 +122,7 @@ func (s *AvsSubscriber) BlockNumberRetryable(ctx context.Context) (uint64, error } return latestBlock, err } - return retry.RetryWithData(latestBlock_func, retry.MinDelay, retry.RetryFactor, retry.NumRetries, retry.MaxInterval, retry.MaxElapsedTime) + return retry.RetryWithData(latestBlock_func, config) } /* @@ -131,11 +131,11 @@ Get NewBatchV2 logs from the AVS contract. - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec. */ -func (s *AvsSubscriber) FilterBatchV2Retryable(opts *bind.FilterOpts, batchMerkleRoot [][32]byte) (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV2Iterator, error) { +func (s *AvsSubscriber) FilterBatchV2Retryable(opts *bind.FilterOpts, batchMerkleRoot [][32]byte, config *retry.RetryConfig) (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV2Iterator, error) { filterNewBatchV2_func := func() (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV2Iterator, error) { return s.AvsContractBindings.ServiceManager.FilterNewBatchV2(opts, batchMerkleRoot) } - return retry.RetryWithData(filterNewBatchV2_func, retry.MinDelay, retry.RetryFactor, retry.NumRetries, retry.MaxInterval, retry.MaxElapsedTime) + return retry.RetryWithData(filterNewBatchV2_func, config) } /* @@ -144,11 +144,11 @@ Get NewBatchV3 logs from the AVS contract. - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec. */ -func (s *AvsSubscriber) FilterBatchV3Retryable(opts *bind.FilterOpts, batchMerkleRoot [][32]byte) (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV3Iterator, error) { +func (s *AvsSubscriber) FilterBatchV3Retryable(opts *bind.FilterOpts, batchMerkleRoot [][32]byte, config *retry.RetryConfig) (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV3Iterator, error) { filterNewBatchV2_func := func() (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV3Iterator, error) { return s.AvsContractBindings.ServiceManager.FilterNewBatchV3(opts, batchMerkleRoot) } - return retry.RetryWithData(filterNewBatchV2_func, retry.MinDelay, retry.RetryFactor, retry.NumRetries, retry.MaxInterval, retry.MaxElapsedTime) + return retry.RetryWithData(filterNewBatchV2_func, config) } /* @@ -157,7 +157,7 @@ Get the state of a batch from the AVS contract. - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec */ -func (s *AvsSubscriber) BatchesStateRetryable(opts *bind.CallOpts, arg0 [32]byte) (struct { +func (s *AvsSubscriber) BatchesStateRetryable(opts *bind.CallOpts, arg0 [32]byte, config *retry.RetryConfig) (struct { TaskCreatedBlock uint32 Responded bool RespondToTaskFeeLimit *big.Int @@ -170,7 +170,7 @@ func (s *AvsSubscriber) BatchesStateRetryable(opts *bind.CallOpts, arg0 [32]byte return s.AvsContractBindings.ServiceManager.ContractAlignedLayerServiceManagerCaller.BatchesState(opts, arg0) } - return retry.RetryWithData(batchState_func, retry.MinDelay, retry.RetryFactor, retry.NumRetries, retry.MaxInterval, retry.MaxElapsedTime) + return retry.RetryWithData(batchState_func, config) } /* @@ -179,7 +179,7 @@ Subscribe to new heads from the Ethereum node. - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec. */ -func (s *AvsSubscriber) SubscribeNewHeadRetryable(ctx context.Context, c chan<- *types.Header) (ethereum.Subscription, error) { +func (s *AvsSubscriber) SubscribeNewHeadRetryable(ctx context.Context, c chan<- *types.Header, config *retry.RetryConfig) (ethereum.Subscription, error) { subscribeNewHead_func := func() (ethereum.Subscription, error) { // Try with main connection sub, err := s.AvsContractBindings.ethClient.SubscribeNewHead(ctx, c) @@ -189,7 +189,7 @@ func (s *AvsSubscriber) SubscribeNewHeadRetryable(ctx context.Context, c chan<- } return sub, err } - return retry.RetryWithData(subscribeNewHead_func, retry.MinDelay, retry.RetryFactor, retry.NumRetries, retry.MaxInterval, retry.MaxElapsedTime) + return retry.RetryWithData(subscribeNewHead_func, config) } /* @@ -203,11 +203,12 @@ func SubscribeToNewTasksV2Retryable( serviceManager *servicemanager.ContractAlignedLayerServiceManager, newTaskCreatedChan chan *servicemanager.ContractAlignedLayerServiceManagerNewBatchV2, batchMerkleRoot [][32]byte, + config *retry.RetryConfig, ) (event.Subscription, error) { subscribe_func := func() (event.Subscription, error) { return serviceManager.WatchNewBatchV2(opts, newTaskCreatedChan, batchMerkleRoot) } - return retry.RetryWithData(subscribe_func, retry.MinDelay, retry.RetryFactor, retry.NumRetries, retry.MaxInterval, retry.MaxElapsedTime) + return retry.RetryWithData(subscribe_func, config) } /* @@ -221,9 +222,10 @@ func SubscribeToNewTasksV3Retryable( serviceManager *servicemanager.ContractAlignedLayerServiceManager, newTaskCreatedChan chan *servicemanager.ContractAlignedLayerServiceManagerNewBatchV3, batchMerkleRoot [][32]byte, + config *retry.RetryConfig, ) (event.Subscription, error) { subscribe_func := func() (event.Subscription, error) { return serviceManager.WatchNewBatchV3(opts, newTaskCreatedChan, batchMerkleRoot) } - return retry.RetryWithData(subscribe_func, retry.MinDelay, retry.RetryFactor, retry.NumRetries, retry.MaxInterval, retry.MaxElapsedTime) + return retry.RetryWithData(subscribe_func, config) } From 1c9f3a819e426ad0854b6caae1f346b94a0aea40 Mon Sep 17 00:00:00 2001 From: PatStiles Date: Thu, 21 Nov 2024 17:23:08 -0300 Subject: [PATCH 04/12] use in operator --- core/chainio/avs_subscriber.go | 36 ++++++------- core/chainio/avs_writer.go | 14 +++--- core/retry_test.go | 92 ++++++++++++++++++++-------------- 3 files changed, 79 insertions(+), 63 deletions(-) diff --git a/core/chainio/avs_subscriber.go b/core/chainio/avs_subscriber.go index ea810da62..a193ed1b3 100644 --- a/core/chainio/avs_subscriber.go +++ b/core/chainio/avs_subscriber.go @@ -66,15 +66,15 @@ func (s *AvsSubscriber) SubscribeToNewTasksV2(newTaskCreatedChan chan *servicema internalChannel := make(chan *servicemanager.ContractAlignedLayerServiceManagerNewBatchV2) // Subscribe to new tasks - sub, err := SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil) + sub, err := SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryConfig()) if err != nil { - s.logger.Error("Primary failed to subscribe to new AlignedLayer V2 tasks after %d retries", retry.NumRetries, "err", err) + s.logger.Error("Primary failed to subscribe to new AlignedLayer V2 tasks after %d retries", retry.EthCallNumRetries, "err", err) return nil, err } - subFallback, err := SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil) + subFallback, err := SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryConfig()) if err != nil { - s.logger.Error("Fallback failed to subscribe to new AlignedLayer V2 tasks after %d retries", retry.NumRetries, "err", err) + s.logger.Error("Fallback failed to subscribe to new AlignedLayer V2 tasks after %d retries", retry.EthCallNumRetries, "err", err) return nil, err } s.logger.Info("Subscribed to new AlignedLayer V2 tasks") @@ -114,14 +114,14 @@ func (s *AvsSubscriber) SubscribeToNewTasksV2(newTaskCreatedChan chan *servicema case err := <-sub.Err(): s.logger.Warn("Error in new task subscription", "err", err) sub.Unsubscribe() - sub, err = SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil) + sub, err = SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryConfig()) if err != nil { errorChannel <- err } case err := <-subFallback.Err(): s.logger.Warn("Error in fallback new task subscription", "err", err) subFallback.Unsubscribe() - subFallback, err = SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil) + subFallback, err = SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryConfig()) if err != nil { errorChannel <- err } @@ -137,13 +137,13 @@ func (s *AvsSubscriber) SubscribeToNewTasksV3(newTaskCreatedChan chan *servicema internalChannel := make(chan *servicemanager.ContractAlignedLayerServiceManagerNewBatchV3) // Subscribe to new tasks - sub, err := SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil) + sub, err := SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryConfig()) if err != nil { s.logger.Error("Primary failed to subscribe to new AlignedLayer V3 tasks after %d retries", MaxRetries, "err", err) return nil, err } - subFallback, err := SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil) + subFallback, err := SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryConfig()) if err != nil { s.logger.Error("Fallback failed to subscribe to new AlignedLayer V3 tasks after %d retries", MaxRetries, "err", err) return nil, err @@ -185,14 +185,14 @@ func (s *AvsSubscriber) SubscribeToNewTasksV3(newTaskCreatedChan chan *servicema case err := <-sub.Err(): s.logger.Warn("Error in new task subscription", "err", err) sub.Unsubscribe() - sub, err = SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil) + sub, err = SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryConfig()) if err != nil { errorChannel <- err } case err := <-subFallback.Err(): s.logger.Warn("Error in fallback new task subscription", "err", err) subFallback.Unsubscribe() - subFallback, err = SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil) + subFallback, err = SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryConfig()) if err != nil { errorChannel <- err } @@ -258,7 +258,7 @@ func (s *AvsSubscriber) processNewBatchV3(batch *servicemanager.ContractAlignedL // getLatestNotRespondedTaskFromEthereum queries the blockchain for the latest not responded task using the FilterNewBatch method. func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV2() (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV2, error) { - latestBlock, err := s.BlockNumberRetryable(context.Background()) + latestBlock, err := s.BlockNumberRetryable(context.Background(), retry.EthCallRetryConfig()) if err != nil { return nil, err } @@ -271,7 +271,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV2() (*servicemanag fromBlock = latestBlock - BlockInterval } - logs, err := s.FilterBatchV2Retryable(&bind.FilterOpts{Start: fromBlock, End: nil, Context: context.Background()}, nil) + logs, err := s.FilterBatchV2Retryable(&bind.FilterOpts{Start: fromBlock, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) if err != nil { return nil, err } @@ -293,7 +293,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV2() (*servicemanag batchIdentifier := append(lastLog.BatchMerkleRoot[:], lastLog.SenderAddress[:]...) batchIdentifierHash := *(*[32]byte)(crypto.Keccak256(batchIdentifier)) - state, err := s.BatchesStateRetryable(nil, batchIdentifierHash) + state, err := s.BatchesStateRetryable(nil, batchIdentifierHash, retry.EthCallRetryConfig()) if err != nil { return nil, err } @@ -307,7 +307,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV2() (*servicemanag // getLatestNotRespondedTaskFromEthereum queries the blockchain for the latest not responded task using the FilterNewBatch method. func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV3() (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV3, error) { - latestBlock, err := s.BlockNumberRetryable(context.Background()) + latestBlock, err := s.BlockNumberRetryable(context.Background(), retry.EthCallRetryConfig()) if err != nil { return nil, err } @@ -320,7 +320,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV3() (*servicemanag fromBlock = latestBlock - BlockInterval } - logs, err := s.FilterBatchV3Retryable(&bind.FilterOpts{Start: fromBlock, End: nil, Context: context.Background()}, nil) + logs, err := s.FilterBatchV3Retryable(&bind.FilterOpts{Start: fromBlock, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) if err != nil { return nil, err } @@ -342,7 +342,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV3() (*servicemanag batchIdentifier := append(lastLog.BatchMerkleRoot[:], lastLog.SenderAddress[:]...) batchIdentifierHash := *(*[32]byte)(crypto.Keccak256(batchIdentifier)) - state, err := s.BatchesStateRetryable(nil, batchIdentifierHash) + state, err := s.BatchesStateRetryable(nil, batchIdentifierHash, retry.EthCallRetryConfig()) if err != nil { return nil, err } @@ -355,7 +355,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV3() (*servicemanag } func (s *AvsSubscriber) WaitForOneBlock(startBlock uint64) error { - currentBlock, err := s.BlockNumberRetryable(context.Background()) + currentBlock, err := s.BlockNumberRetryable(context.Background(), retry.EthCallRetryConfig()) if err != nil { return err } @@ -363,7 +363,7 @@ func (s *AvsSubscriber) WaitForOneBlock(startBlock uint64) error { if currentBlock <= startBlock { // should really be == but just in case // Subscribe to new head c := make(chan *types.Header) - sub, err := s.SubscribeNewHeadRetryable(context.Background(), c) + sub, err := s.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryConfig()) if err != nil { return err } diff --git a/core/chainio/avs_writer.go b/core/chainio/avs_writer.go index d419ad2ca..c680eeb80 100644 --- a/core/chainio/avs_writer.go +++ b/core/chainio/avs_writer.go @@ -100,7 +100,7 @@ func NewAvsWriterFromConfig(baseConfig *config.BaseConfig, ecdsaConfig *config.E func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMerkleRoot [32]byte, senderAddress [20]byte, nonSignerStakesAndSignature servicemanager.IBLSSignatureCheckerNonSignerStakesAndSignature, gasBumpPercentage uint, gasBumpIncrementalPercentage uint, timeToWaitBeforeBump time.Duration, onGasPriceBumped func(*big.Int)) (*types.Receipt, error) { txOpts := *w.Signer.GetTxOpts() txOpts.NoSend = true // simulate the transaction - simTx, err := w.RespondToTaskV2Retryable(&txOpts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature) + simTx, err := w.RespondToTaskV2Retryable(&txOpts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature, retry.ChainRetryConfig()) if err != nil { return nil, err } @@ -164,7 +164,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe } } w.logger.Infof("Receipts for old transactions not found, will check if the batch state has been responded", "merkle root", batchMerkleRootHashString) - batchState, _ := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash) + batchState, _ := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.ChainRetryConfig()) if batchState.Responded { w.logger.Infof("Batch state has been already responded", "merkle root", batchMerkleRootHashString) return nil, nil @@ -183,7 +183,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe } w.logger.Infof("Sending RespondToTask transaction with a gas price of %v", txOpts.GasPrice, "merkle root", batchMerkleRootHashString) - realTx, err := w.RespondToTaskV2Retryable(&txOpts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature) + realTx, err := w.RespondToTaskV2Retryable(&txOpts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature, retry.ChainRetryConfig()) if err != nil { w.logger.Errorf("Respond to task transaction err, %v", err, "merkle root", batchMerkleRootHashString) return nil, err @@ -218,7 +218,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe // if the tx cost was higher, then it means the aggregator has paid the difference for the batcher (txCost - respondToTaskFeeLimit) and so metrics are updated accordingly. // otherwise nothing is done. func (w *AvsWriter) checkIfAggregatorHadToPaidForBatcher(tx *types.Transaction, batchIdentifierHash [32]byte) { - batchState, err := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash) + batchState, err := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.EthCallRetryConfig()) if err != nil { return } @@ -244,7 +244,7 @@ func (w *AvsWriter) checkAggAndBatcherHaveEnoughBalance(tx *types.Transaction, t txCost := new(big.Int).Mul(txGasAsBigInt, txGasPrice) w.logger.Info("Transaction cost", "cost", txCost) - batchState, err := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash) + batchState, err := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.EthCallRetryConfig()) if err != nil { w.logger.Error("Failed to get batch state", "error", err) w.logger.Info("Proceeding to check balances against transaction cost") @@ -272,7 +272,7 @@ func (w *AvsWriter) compareAggregatorBalance(amount *big.Int, aggregatorAddress ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - aggregatorBalance, err := w.BalanceAtRetryable(ctx, aggregatorAddress, nil) + aggregatorBalance, err := w.BalanceAtRetryable(ctx, aggregatorAddress, nil, retry.EthCallRetryConfig()) if err != nil { // Ignore and continue. w.logger.Error("failed to get aggregator balance: %v", err) @@ -287,7 +287,7 @@ func (w *AvsWriter) compareAggregatorBalance(amount *big.Int, aggregatorAddress func (w *AvsWriter) compareBatcherBalance(amount *big.Int, senderAddress [20]byte) error { // Get batcher balance - batcherBalance, err := w.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress) + batcherBalance, err := w.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryConfig()) if err != nil { // Ignore and continue. w.logger.Error("Failed to get batcherBalance", "error", err) diff --git a/core/retry_test.go b/core/retry_test.go index 22d886f2e..d379a8279 100644 --- a/core/retry_test.go +++ b/core/retry_test.go @@ -42,7 +42,15 @@ func TestRetryWithData(t *testing.T) { x, err := DummyFunction(43) return &x, err } - _, err := retry.RetryWithData(function, 1000, 2, 3, retry.MaxInterval, retry.MaxElapsedTime) + config := &retry.RetryConfig{ + InitialInterval: 1000, + MaxInterval: 2, + MaxElapsedTime: 3, + RandomizationFactor: 0, + Multiplier: retry.EthCallMultiplier, + NumRetries: retry.EthCallNumRetries, + } + _, err := retry.RetryWithData(function, config) if err != nil { t.Errorf("Retry error!: %s", err) } @@ -53,7 +61,15 @@ func TestRetry(t *testing.T) { _, err := DummyFunction(43) return err } - err := retry.Retry(function, 1000, 2, 3, retry.MaxInterval, retry.MaxElapsedTime) + config := &retry.RetryConfig{ + InitialInterval: 1000, + MaxInterval: 2, + MaxElapsedTime: 3, + RandomizationFactor: 0, + Multiplier: retry.EthCallMultiplier, + NumRetries: retry.EthCallNumRetries, + } + err := retry.Retry(function, config) if err != nil { t.Errorf("Retry error!: %s", err) } @@ -153,7 +169,7 @@ func TestWaitForTransactionReceipt(t *testing.T) { } // Assert Call succeeds when Anvil running - _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, time.Second*45) + _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.EthCallRetryConfig()) assert.NotNil(t, err, "Error Waiting for Transaction with Anvil Running: %s\n", err) if !strings.Contains(err.Error(), "not found") { t.Errorf("WaitForTransactionReceipt Emitted incorrect error: %s\n", err) @@ -165,7 +181,7 @@ func TestWaitForTransactionReceipt(t *testing.T) { return } - _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, time.Second*45) + _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.EthCallRetryConfig()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("WaitForTransactionReceipt Emitted non Transient error: %s\n", err) @@ -181,7 +197,7 @@ func TestWaitForTransactionReceipt(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, time.Second*45) + _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.EthCallRetryConfig()) assert.NotNil(t, err) if !strings.Contains(err.Error(), "not found") { t.Errorf("WaitForTransactionReceipt Emitted incorrect error: %s\n", err) @@ -294,7 +310,7 @@ func TestSubscribeToNewTasksV3(t *testing.T) { t.Errorf("Error setting up Avs Service Bindings: %s\n", err) } - _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil) + _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -302,7 +318,7 @@ func TestSubscribeToNewTasksV3(t *testing.T) { return } - _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil) + _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryConfig()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("SubscribeToNewTasksV3 Emitted non Transient error: %s\n", err) @@ -318,7 +334,7 @@ func TestSubscribeToNewTasksV3(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil) + _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -344,7 +360,7 @@ func TestSubscribeToNewTasksV2(t *testing.T) { t.Errorf("Error setting up Avs Service Bindings: %s\n", err) } - _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil) + _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -352,7 +368,7 @@ func TestSubscribeToNewTasksV2(t *testing.T) { return } - _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil) + _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryConfig()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("SubscribeToNewTasksV2 Emitted non Transient error: %s\n", err) @@ -368,7 +384,7 @@ func TestSubscribeToNewTasksV2(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil) + _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -389,7 +405,7 @@ func TestBlockNumber(t *testing.T) { if err != nil { return } - _, err = sub.BlockNumberRetryable(context.Background()) + _, err = sub.BlockNumberRetryable(context.Background(), retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -397,7 +413,7 @@ func TestBlockNumber(t *testing.T) { return } - _, err = sub.BlockNumberRetryable(context.Background()) + _, err = sub.BlockNumberRetryable(context.Background(), retry.EthCallRetryConfig()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BlockNumber Emitted non Transient error: %s\n", err) @@ -413,7 +429,7 @@ func TestBlockNumber(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = sub.BlockNumberRetryable(context.Background()) + _, err = sub.BlockNumberRetryable(context.Background(), retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -433,7 +449,7 @@ func TestFilterBatchV2(t *testing.T) { if err != nil { return } - _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil) + _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -441,7 +457,7 @@ func TestFilterBatchV2(t *testing.T) { return } - _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil) + _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("FilterBatchV2 Emitted non Transient error: %s\n", err) @@ -457,7 +473,7 @@ func TestFilterBatchV2(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil) + _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -477,7 +493,7 @@ func TestFilterBatchV3(t *testing.T) { if err != nil { return } - _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil) + _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -485,7 +501,7 @@ func TestFilterBatchV3(t *testing.T) { return } - _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil) + _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("FilerBatchV3 Emitted non Transient error: %s\n", err) @@ -501,7 +517,7 @@ func TestFilterBatchV3(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil) + _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -523,7 +539,7 @@ func TestBatchesStateSubscriber(t *testing.T) { } zero_bytes := [32]byte{} - _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes) + _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -531,7 +547,7 @@ func TestBatchesStateSubscriber(t *testing.T) { return } - _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes) + _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.EthCallRetryConfig()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BatchesStateSubscriber Emitted non Transient error: %s\n", err) @@ -547,7 +563,7 @@ func TestBatchesStateSubscriber(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes) + _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -569,7 +585,7 @@ func TestSubscribeNewHead(t *testing.T) { return } - _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c) + _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -577,7 +593,7 @@ func TestSubscribeNewHead(t *testing.T) { return } - _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c) + _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryConfig()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("SubscribeNewHead Emitted non Transient error: %s\n", err) @@ -593,7 +609,7 @@ func TestSubscribeNewHead(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c) + _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -646,7 +662,7 @@ func TestRespondToTaskV2(t *testing.T) { zero_bytes := [32]byte{} // NOTE: With zero bytes the tx reverts - _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature) + _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.EthCallRetryConfig()) assert.NotNil(t, err) if !strings.Contains(err.Error(), "execution reverted") { t.Errorf("RespondToTaskV2 did not emit the expected message: %q doesn't contain %q", err.Error(), "execution reverted: custom error 0x2396d34e:") @@ -656,7 +672,7 @@ func TestRespondToTaskV2(t *testing.T) { t.Errorf("Error killing process: %v\n", err) } - _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature) + _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.EthCallRetryConfig()) assert.NotNil(t, err) if _, ok := err.(*backoff.PermanentError); ok { t.Errorf("RespondToTaskV2 Emitted non-Transient error: %s\n", err) @@ -671,7 +687,7 @@ func TestRespondToTaskV2(t *testing.T) { } // NOTE: With zero bytes the tx reverts - _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature) + _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.EthCallRetryConfig()) assert.NotNil(t, err) if !strings.Contains(err.Error(), "execution reverted") { t.Errorf("RespondToTaskV2 did not emit the expected message: %q doesn't contain %q", err.Error(), "execution reverted: custom error 0x2396d34e:") @@ -699,7 +715,7 @@ func TestBatchesStateWriter(t *testing.T) { var bytes [32]byte num.FillBytes(bytes[:]) - _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes) + _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -707,7 +723,7 @@ func TestBatchesStateWriter(t *testing.T) { return } - _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes) + _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.EthCallRetryConfig()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BatchesStateWriter Emitted non-Transient error: %s\n", err) @@ -723,7 +739,7 @@ func TestBatchesStateWriter(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes) + _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -746,7 +762,7 @@ func TestBalanceAt(t *testing.T) { aggregator_address := common.HexToAddress("0x0") blockHeight := big.NewInt(22) - _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight) + _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -754,7 +770,7 @@ func TestBalanceAt(t *testing.T) { return } - _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight) + _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.EthCallRetryConfig()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BalanceAt Emitted non-Transient error: %s\n", err) @@ -770,7 +786,7 @@ func TestBalanceAt(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight) + _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -792,7 +808,7 @@ func TestBatchersBalances(t *testing.T) { } senderAddress := common.HexToAddress("0x0") - _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress) + _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -800,7 +816,7 @@ func TestBatchersBalances(t *testing.T) { return } - _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress) + _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryConfig()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BatchersBalances Emitted non-Transient error: %s\n", err) @@ -816,7 +832,7 @@ func TestBatchersBalances(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress) + _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryConfig()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { From 15480f7586815e74617c794d17742c795af57386 Mon Sep 17 00:00:00 2001 From: PatStiles Date: Thu, 21 Nov 2024 18:07:43 -0300 Subject: [PATCH 05/12] add missing config --- aggregator/pkg/server.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aggregator/pkg/server.go b/aggregator/pkg/server.go index e8efa4006..01bcadde4 100644 --- a/aggregator/pkg/server.go +++ b/aggregator/pkg/server.go @@ -50,7 +50,7 @@ func (agg *Aggregator) ProcessOperatorSignedTaskResponseV2(signedTaskResponse *t "operatorId", hex.EncodeToString(signedTaskResponse.OperatorId[:])) taskIndex := uint32(0) - taskIndex, err := agg.GetTaskIndex(signedTaskResponse.BatchIdentifierHash) + taskIndex, err := agg.GetTaskIndex(signedTaskResponse.BatchIdentifierHash, retry.EthCallRetryConfig()) if err != nil { agg.logger.Warn("Task not found in the internal map, operator signature will be lost. Batch may not reach quorum") @@ -106,7 +106,7 @@ func (agg *Aggregator) ServerRunning(_ *struct{}, reply *int64) error { return nil } -func (agg *Aggregator) GetTaskIndex(batchIdentifierHash [32]byte) (uint32, error) { +func (agg *Aggregator) GetTaskIndex(batchIdentifierHash [32]byte, config *retry.RetryConfig) (uint32, error) { getTaskIndex_func := func() (uint32, error) { agg.taskMutex.Lock() taskIndex, ok := agg.batchesIdxByIdentifierHash[batchIdentifierHash] @@ -118,5 +118,5 @@ func (agg *Aggregator) GetTaskIndex(batchIdentifierHash [32]byte) (uint32, error } } - return retry.RetryWithData(getTaskIndex_func, retry.EthCallRetryConfig()) + return retry.RetryWithData(getTaskIndex_func, config) } From 31b66d7d8e5d0b0021c3a40a368135750ccc3920 Mon Sep 17 00:00:00 2001 From: PatStiles Date: Thu, 21 Nov 2024 18:15:51 -0300 Subject: [PATCH 06/12] add retry + merge --- aggregator/pkg/server.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aggregator/pkg/server.go b/aggregator/pkg/server.go index 111f6ec47..7129283d6 100644 --- a/aggregator/pkg/server.go +++ b/aggregator/pkg/server.go @@ -55,6 +55,7 @@ func (agg *Aggregator) ProcessOperatorSignedTaskResponseV2(signedTaskResponse *t // so we make GetTaskIndex retryable, waiting for some seconds, // before trying to fetch the task again from the map. taskIndex, err := agg.GetTaskIndexRetryable(signedTaskResponse.BatchIdentifierHash, retry.EthCallRetryConfig()) + taskIndex, err := agg.GetTaskIndexRetryable(signedTaskResponse.BatchIdentifierHash, retry.EthCallRetryConfig()) if err != nil { agg.logger.Warn("Task not found in the internal map, operator signature will be lost. Batch may not reach quorum") @@ -116,7 +117,7 @@ Checks Internal mapping for Signed Task Response, returns its TaskIndex. - Retry times (3 retries): 1 sec, 2 sec, 4 sec TODO: We should refactor the retry duration considering extending it to a larger time or number of retries, at least somewhere between 1 and 2 blocks */ -func (agg *Aggregator) GetTaskIndex(batchIdentifierHash [32]byte, config *retry.RetryConfig) (uint32, error) { +func (agg *Aggregator) GetTaskIndexRetryable(batchIdentifierHash [32]byte, config *retry.RetryConfig) (uint32, error) { getTaskIndex_func := func() (uint32, error) { agg.taskMutex.Lock() taskIndex, ok := agg.batchesIdxByIdentifierHash[batchIdentifierHash] From ddcd2185bfbe78499f3c59a61749766e366f5dc4 Mon Sep 17 00:00:00 2001 From: PatStiles Date: Thu, 21 Nov 2024 18:20:52 -0300 Subject: [PATCH 07/12] nit --- aggregator/pkg/server.go | 1 - 1 file changed, 1 deletion(-) diff --git a/aggregator/pkg/server.go b/aggregator/pkg/server.go index 7129283d6..02a72ff7c 100644 --- a/aggregator/pkg/server.go +++ b/aggregator/pkg/server.go @@ -55,7 +55,6 @@ func (agg *Aggregator) ProcessOperatorSignedTaskResponseV2(signedTaskResponse *t // so we make GetTaskIndex retryable, waiting for some seconds, // before trying to fetch the task again from the map. taskIndex, err := agg.GetTaskIndexRetryable(signedTaskResponse.BatchIdentifierHash, retry.EthCallRetryConfig()) - taskIndex, err := agg.GetTaskIndexRetryable(signedTaskResponse.BatchIdentifierHash, retry.EthCallRetryConfig()) if err != nil { agg.logger.Warn("Task not found in the internal map, operator signature will be lost. Batch may not reach quorum") From 5d1776ad4059272fc9383074d2ab936ebc73415a Mon Sep 17 00:00:00 2001 From: PatStiles Date: Thu, 21 Nov 2024 18:51:16 -0300 Subject: [PATCH 08/12] change name from config -> params --- core/chainio/avs_subscriber.go | 32 +++++++------- core/chainio/avs_writer.go | 20 ++++----- core/chainio/retryable.go | 22 +++++----- core/retry.go | 14 +++---- core/retry_test.go | 77 +++++++++++++++++----------------- core/utils/eth_client_utils.go | 4 +- 6 files changed, 85 insertions(+), 84 deletions(-) diff --git a/core/chainio/avs_subscriber.go b/core/chainio/avs_subscriber.go index a193ed1b3..10175bdc5 100644 --- a/core/chainio/avs_subscriber.go +++ b/core/chainio/avs_subscriber.go @@ -66,13 +66,13 @@ func (s *AvsSubscriber) SubscribeToNewTasksV2(newTaskCreatedChan chan *servicema internalChannel := make(chan *servicemanager.ContractAlignedLayerServiceManagerNewBatchV2) // Subscribe to new tasks - sub, err := SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryConfig()) + sub, err := SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryParams()) if err != nil { s.logger.Error("Primary failed to subscribe to new AlignedLayer V2 tasks after %d retries", retry.EthCallNumRetries, "err", err) return nil, err } - subFallback, err := SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryConfig()) + subFallback, err := SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryParams()) if err != nil { s.logger.Error("Fallback failed to subscribe to new AlignedLayer V2 tasks after %d retries", retry.EthCallNumRetries, "err", err) return nil, err @@ -114,14 +114,14 @@ func (s *AvsSubscriber) SubscribeToNewTasksV2(newTaskCreatedChan chan *servicema case err := <-sub.Err(): s.logger.Warn("Error in new task subscription", "err", err) sub.Unsubscribe() - sub, err = SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryConfig()) + sub, err = SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryParams()) if err != nil { errorChannel <- err } case err := <-subFallback.Err(): s.logger.Warn("Error in fallback new task subscription", "err", err) subFallback.Unsubscribe() - subFallback, err = SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryConfig()) + subFallback, err = SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryParams()) if err != nil { errorChannel <- err } @@ -137,13 +137,13 @@ func (s *AvsSubscriber) SubscribeToNewTasksV3(newTaskCreatedChan chan *servicema internalChannel := make(chan *servicemanager.ContractAlignedLayerServiceManagerNewBatchV3) // Subscribe to new tasks - sub, err := SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryConfig()) + sub, err := SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryParams()) if err != nil { s.logger.Error("Primary failed to subscribe to new AlignedLayer V3 tasks after %d retries", MaxRetries, "err", err) return nil, err } - subFallback, err := SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryConfig()) + subFallback, err := SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryParams()) if err != nil { s.logger.Error("Fallback failed to subscribe to new AlignedLayer V3 tasks after %d retries", MaxRetries, "err", err) return nil, err @@ -185,14 +185,14 @@ func (s *AvsSubscriber) SubscribeToNewTasksV3(newTaskCreatedChan chan *servicema case err := <-sub.Err(): s.logger.Warn("Error in new task subscription", "err", err) sub.Unsubscribe() - sub, err = SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryConfig()) + sub, err = SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryParams()) if err != nil { errorChannel <- err } case err := <-subFallback.Err(): s.logger.Warn("Error in fallback new task subscription", "err", err) subFallback.Unsubscribe() - subFallback, err = SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryConfig()) + subFallback, err = SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryParams()) if err != nil { errorChannel <- err } @@ -258,7 +258,7 @@ func (s *AvsSubscriber) processNewBatchV3(batch *servicemanager.ContractAlignedL // getLatestNotRespondedTaskFromEthereum queries the blockchain for the latest not responded task using the FilterNewBatch method. func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV2() (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV2, error) { - latestBlock, err := s.BlockNumberRetryable(context.Background(), retry.EthCallRetryConfig()) + latestBlock, err := s.BlockNumberRetryable(context.Background(), retry.EthCallRetryParams()) if err != nil { return nil, err } @@ -271,7 +271,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV2() (*servicemanag fromBlock = latestBlock - BlockInterval } - logs, err := s.FilterBatchV2Retryable(&bind.FilterOpts{Start: fromBlock, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) + logs, err := s.FilterBatchV2Retryable(&bind.FilterOpts{Start: fromBlock, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) if err != nil { return nil, err } @@ -293,7 +293,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV2() (*servicemanag batchIdentifier := append(lastLog.BatchMerkleRoot[:], lastLog.SenderAddress[:]...) batchIdentifierHash := *(*[32]byte)(crypto.Keccak256(batchIdentifier)) - state, err := s.BatchesStateRetryable(nil, batchIdentifierHash, retry.EthCallRetryConfig()) + state, err := s.BatchesStateRetryable(nil, batchIdentifierHash, retry.EthCallRetryParams()) if err != nil { return nil, err } @@ -307,7 +307,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV2() (*servicemanag // getLatestNotRespondedTaskFromEthereum queries the blockchain for the latest not responded task using the FilterNewBatch method. func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV3() (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV3, error) { - latestBlock, err := s.BlockNumberRetryable(context.Background(), retry.EthCallRetryConfig()) + latestBlock, err := s.BlockNumberRetryable(context.Background(), retry.EthCallRetryParams()) if err != nil { return nil, err } @@ -320,7 +320,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV3() (*servicemanag fromBlock = latestBlock - BlockInterval } - logs, err := s.FilterBatchV3Retryable(&bind.FilterOpts{Start: fromBlock, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) + logs, err := s.FilterBatchV3Retryable(&bind.FilterOpts{Start: fromBlock, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) if err != nil { return nil, err } @@ -342,7 +342,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV3() (*servicemanag batchIdentifier := append(lastLog.BatchMerkleRoot[:], lastLog.SenderAddress[:]...) batchIdentifierHash := *(*[32]byte)(crypto.Keccak256(batchIdentifier)) - state, err := s.BatchesStateRetryable(nil, batchIdentifierHash, retry.EthCallRetryConfig()) + state, err := s.BatchesStateRetryable(nil, batchIdentifierHash, retry.EthCallRetryParams()) if err != nil { return nil, err } @@ -355,7 +355,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV3() (*servicemanag } func (s *AvsSubscriber) WaitForOneBlock(startBlock uint64) error { - currentBlock, err := s.BlockNumberRetryable(context.Background(), retry.EthCallRetryConfig()) + currentBlock, err := s.BlockNumberRetryable(context.Background(), retry.EthCallRetryParams()) if err != nil { return err } @@ -363,7 +363,7 @@ func (s *AvsSubscriber) WaitForOneBlock(startBlock uint64) error { if currentBlock <= startBlock { // should really be == but just in case // Subscribe to new head c := make(chan *types.Header) - sub, err := s.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryConfig()) + sub, err := s.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryParams()) if err != nil { return err } diff --git a/core/chainio/avs_writer.go b/core/chainio/avs_writer.go index c680eeb80..7285282cb 100644 --- a/core/chainio/avs_writer.go +++ b/core/chainio/avs_writer.go @@ -100,7 +100,7 @@ func NewAvsWriterFromConfig(baseConfig *config.BaseConfig, ecdsaConfig *config.E func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMerkleRoot [32]byte, senderAddress [20]byte, nonSignerStakesAndSignature servicemanager.IBLSSignatureCheckerNonSignerStakesAndSignature, gasBumpPercentage uint, gasBumpIncrementalPercentage uint, timeToWaitBeforeBump time.Duration, onGasPriceBumped func(*big.Int)) (*types.Receipt, error) { txOpts := *w.Signer.GetTxOpts() txOpts.NoSend = true // simulate the transaction - simTx, err := w.RespondToTaskV2Retryable(&txOpts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature, retry.ChainRetryConfig()) + simTx, err := w.RespondToTaskV2Retryable(&txOpts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature, retry.ChainRetryParams()) if err != nil { return nil, err } @@ -113,13 +113,13 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe i := 0 // Set Retry config for RespondToTaskV2 - respondToTaskV2Config := retry.EthCallRetryConfig() + respondToTaskV2Config := retry.EthCallRetryParams() respondToTaskV2Config.NumRetries = respondToTaskV2NumRetries respondToTaskV2Config.MaxInterval = respondToTaskV2MaxInterval respondToTaskV2Config.MaxElapsedTime = respondToTaskV2MaxElapsedTime // Set Retry config for WaitForTxRetryable - waitForTxConfig := retry.EthCallRetryConfig() + waitForTxConfig := retry.EthCallRetryParams() waitForTxConfig.MaxInterval = waitForTxMaxInterval waitForTxConfig.NumRetries = waitForTxNumRetries waitForTxConfig.MaxElapsedTime = timeToWaitBeforeBump @@ -129,7 +129,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe batchMerkleRootHashString := hex.EncodeToString(batchMerkleRoot[:]) respondToTaskV2Func := func() (*types.Receipt, error) { - gasPrice, err := utils.GetGasPriceRetryable(w.Client, w.ClientFallback, retry.EthCallRetryConfig()) + gasPrice, err := utils.GetGasPriceRetryable(w.Client, w.ClientFallback, retry.EthCallRetryParams()) if err != nil { return nil, err } @@ -164,7 +164,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe } } w.logger.Infof("Receipts for old transactions not found, will check if the batch state has been responded", "merkle root", batchMerkleRootHashString) - batchState, _ := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.ChainRetryConfig()) + batchState, _ := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.ChainRetryParams()) if batchState.Responded { w.logger.Infof("Batch state has been already responded", "merkle root", batchMerkleRootHashString) return nil, nil @@ -183,7 +183,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe } w.logger.Infof("Sending RespondToTask transaction with a gas price of %v", txOpts.GasPrice, "merkle root", batchMerkleRootHashString) - realTx, err := w.RespondToTaskV2Retryable(&txOpts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature, retry.ChainRetryConfig()) + realTx, err := w.RespondToTaskV2Retryable(&txOpts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature, retry.ChainRetryParams()) if err != nil { w.logger.Errorf("Respond to task transaction err, %v", err, "merkle root", batchMerkleRootHashString) return nil, err @@ -218,7 +218,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe // if the tx cost was higher, then it means the aggregator has paid the difference for the batcher (txCost - respondToTaskFeeLimit) and so metrics are updated accordingly. // otherwise nothing is done. func (w *AvsWriter) checkIfAggregatorHadToPaidForBatcher(tx *types.Transaction, batchIdentifierHash [32]byte) { - batchState, err := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.EthCallRetryConfig()) + batchState, err := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.EthCallRetryParams()) if err != nil { return } @@ -244,7 +244,7 @@ func (w *AvsWriter) checkAggAndBatcherHaveEnoughBalance(tx *types.Transaction, t txCost := new(big.Int).Mul(txGasAsBigInt, txGasPrice) w.logger.Info("Transaction cost", "cost", txCost) - batchState, err := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.EthCallRetryConfig()) + batchState, err := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.EthCallRetryParams()) if err != nil { w.logger.Error("Failed to get batch state", "error", err) w.logger.Info("Proceeding to check balances against transaction cost") @@ -272,7 +272,7 @@ func (w *AvsWriter) compareAggregatorBalance(amount *big.Int, aggregatorAddress ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - aggregatorBalance, err := w.BalanceAtRetryable(ctx, aggregatorAddress, nil, retry.EthCallRetryConfig()) + aggregatorBalance, err := w.BalanceAtRetryable(ctx, aggregatorAddress, nil, retry.EthCallRetryParams()) if err != nil { // Ignore and continue. w.logger.Error("failed to get aggregator balance: %v", err) @@ -287,7 +287,7 @@ func (w *AvsWriter) compareAggregatorBalance(amount *big.Int, aggregatorAddress func (w *AvsWriter) compareBatcherBalance(amount *big.Int, senderAddress [20]byte) error { // Get batcher balance - batcherBalance, err := w.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryConfig()) + batcherBalance, err := w.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryParams()) if err != nil { // Ignore and continue. w.logger.Error("Failed to get batcherBalance", "error", err) diff --git a/core/chainio/retryable.go b/core/chainio/retryable.go index 8df721491..bf4724e2f 100644 --- a/core/chainio/retryable.go +++ b/core/chainio/retryable.go @@ -22,7 +22,7 @@ Send a transaction to the AVS contract to respond to a task. - Retry times (3 retries): 12 sec (1 Blocks), 24 sec (2 Blocks), 48 sec (4 Blocks) - NOTE: Contract call reverts are not considered `PermanentError`'s as block reorg's may lead to contract call revert in which case the aggregator should retry. */ -func (w *AvsWriter) RespondToTaskV2Retryable(opts *bind.TransactOpts, batchMerkleRoot [32]byte, senderAddress common.Address, nonSignerStakesAndSignature servicemanager.IBLSSignatureCheckerNonSignerStakesAndSignature, config *retry.RetryConfig) (*types.Transaction, error) { +func (w *AvsWriter) RespondToTaskV2Retryable(opts *bind.TransactOpts, batchMerkleRoot [32]byte, senderAddress common.Address, nonSignerStakesAndSignature servicemanager.IBLSSignatureCheckerNonSignerStakesAndSignature, config *retry.RetryParams) (*types.Transaction, error) { respondToTaskV2_func := func() (*types.Transaction, error) { // Try with main connection tx, err := w.AvsContractBindings.ServiceManager.RespondToTaskV2(opts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature) @@ -42,7 +42,7 @@ Get the state of a batch from the AVS contract. - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec */ -func (w *AvsWriter) BatchesStateRetryable(opts *bind.CallOpts, arg0 [32]byte, config *retry.RetryConfig) (struct { +func (w *AvsWriter) BatchesStateRetryable(opts *bind.CallOpts, arg0 [32]byte, config *retry.RetryParams) (struct { TaskCreatedBlock uint32 Responded bool RespondToTaskFeeLimit *big.Int @@ -70,7 +70,7 @@ Get the balance of a batcher from the AVS contract. - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec */ -func (w *AvsWriter) BatcherBalancesRetryable(opts *bind.CallOpts, senderAddress common.Address, config *retry.RetryConfig) (*big.Int, error) { +func (w *AvsWriter) BatcherBalancesRetryable(opts *bind.CallOpts, senderAddress common.Address, config *retry.RetryParams) (*big.Int, error) { batcherBalances_func := func() (*big.Int, error) { // Try with main connection batcherBalance, err := w.AvsContractBindings.ServiceManager.BatchersBalances(opts, senderAddress) @@ -91,7 +91,7 @@ TODO: it gets the balance from an Address, not necessarily an aggregator. The na - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec. */ -func (w *AvsWriter) BalanceAtRetryable(ctx context.Context, aggregatorAddress common.Address, blockNumber *big.Int, config *retry.RetryConfig) (*big.Int, error) { +func (w *AvsWriter) BalanceAtRetryable(ctx context.Context, aggregatorAddress common.Address, blockNumber *big.Int, config *retry.RetryParams) (*big.Int, error) { balanceAt_func := func() (*big.Int, error) { // Try with main connection aggregatorBalance, err := w.Client.BalanceAt(ctx, aggregatorAddress, blockNumber) @@ -112,7 +112,7 @@ Get the latest block number from Ethereum - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec. */ -func (s *AvsSubscriber) BlockNumberRetryable(ctx context.Context, config *retry.RetryConfig) (uint64, error) { +func (s *AvsSubscriber) BlockNumberRetryable(ctx context.Context, config *retry.RetryParams) (uint64, error) { latestBlock_func := func() (uint64, error) { // Try with main connection latestBlock, err := s.AvsContractBindings.ethClient.BlockNumber(ctx) @@ -131,7 +131,7 @@ Get NewBatchV2 logs from the AVS contract. - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec. */ -func (s *AvsSubscriber) FilterBatchV2Retryable(opts *bind.FilterOpts, batchMerkleRoot [][32]byte, config *retry.RetryConfig) (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV2Iterator, error) { +func (s *AvsSubscriber) FilterBatchV2Retryable(opts *bind.FilterOpts, batchMerkleRoot [][32]byte, config *retry.RetryParams) (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV2Iterator, error) { filterNewBatchV2_func := func() (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV2Iterator, error) { return s.AvsContractBindings.ServiceManager.FilterNewBatchV2(opts, batchMerkleRoot) } @@ -144,7 +144,7 @@ Get NewBatchV3 logs from the AVS contract. - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec. */ -func (s *AvsSubscriber) FilterBatchV3Retryable(opts *bind.FilterOpts, batchMerkleRoot [][32]byte, config *retry.RetryConfig) (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV3Iterator, error) { +func (s *AvsSubscriber) FilterBatchV3Retryable(opts *bind.FilterOpts, batchMerkleRoot [][32]byte, config *retry.RetryParams) (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV3Iterator, error) { filterNewBatchV2_func := func() (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV3Iterator, error) { return s.AvsContractBindings.ServiceManager.FilterNewBatchV3(opts, batchMerkleRoot) } @@ -157,7 +157,7 @@ Get the state of a batch from the AVS contract. - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec */ -func (s *AvsSubscriber) BatchesStateRetryable(opts *bind.CallOpts, arg0 [32]byte, config *retry.RetryConfig) (struct { +func (s *AvsSubscriber) BatchesStateRetryable(opts *bind.CallOpts, arg0 [32]byte, config *retry.RetryParams) (struct { TaskCreatedBlock uint32 Responded bool RespondToTaskFeeLimit *big.Int @@ -179,7 +179,7 @@ Subscribe to new heads from the Ethereum node. - All errors are considered Transient Errors - Retry times (3 retries): 1 sec, 2 sec, 4 sec. */ -func (s *AvsSubscriber) SubscribeNewHeadRetryable(ctx context.Context, c chan<- *types.Header, config *retry.RetryConfig) (ethereum.Subscription, error) { +func (s *AvsSubscriber) SubscribeNewHeadRetryable(ctx context.Context, c chan<- *types.Header, config *retry.RetryParams) (ethereum.Subscription, error) { subscribeNewHead_func := func() (ethereum.Subscription, error) { // Try with main connection sub, err := s.AvsContractBindings.ethClient.SubscribeNewHead(ctx, c) @@ -203,7 +203,7 @@ func SubscribeToNewTasksV2Retryable( serviceManager *servicemanager.ContractAlignedLayerServiceManager, newTaskCreatedChan chan *servicemanager.ContractAlignedLayerServiceManagerNewBatchV2, batchMerkleRoot [][32]byte, - config *retry.RetryConfig, + config *retry.RetryParams, ) (event.Subscription, error) { subscribe_func := func() (event.Subscription, error) { return serviceManager.WatchNewBatchV2(opts, newTaskCreatedChan, batchMerkleRoot) @@ -222,7 +222,7 @@ func SubscribeToNewTasksV3Retryable( serviceManager *servicemanager.ContractAlignedLayerServiceManager, newTaskCreatedChan chan *servicemanager.ContractAlignedLayerServiceManagerNewBatchV3, batchMerkleRoot [][32]byte, - config *retry.RetryConfig, + config *retry.RetryParams, ) (event.Subscription, error) { subscribe_func := func() (event.Subscription, error) { return serviceManager.WatchNewBatchV3(opts, newTaskCreatedChan, batchMerkleRoot) diff --git a/core/retry.go b/core/retry.go index ff9a367f6..e4f67c8ab 100644 --- a/core/retry.go +++ b/core/retry.go @@ -35,7 +35,7 @@ const ( ChainMaxInterval = 2 * time.Minute // Maximum interval for an individual retry. ) -type RetryConfig struct { +type RetryParams struct { InitialInterval time.Duration // Initial delay for retry interval. MaxInterval time.Duration // Maximum interval an individual retry may have. MaxElapsedTime time.Duration // Maximum time all retries may take. `0` corresponds to no limit on the time of the retries. @@ -44,8 +44,8 @@ type RetryConfig struct { NumRetries uint64 } -func EthCallRetryConfig() *RetryConfig { - return &RetryConfig{ +func EthCallRetryParams() *RetryParams { + return &RetryParams{ InitialInterval: EthCallInitialInterval, MaxInterval: EthCallMaxInterval, MaxElapsedTime: EthCallMaxElapsedTime, @@ -55,8 +55,8 @@ func EthCallRetryConfig() *RetryConfig { } } -func ChainRetryConfig() *RetryConfig { - return &RetryConfig{ +func ChainRetryParams() *RetryParams { + return &RetryParams{ InitialInterval: ChainInitialInterval, MaxInterval: ChainMaxInterval, MaxElapsedTime: EthCallMaxElapsedTime, @@ -124,7 +124,7 @@ Reference: https://github.com/cenkalti/backoff/blob/v4/exponential.go#L9 */ // Same as Retry only that the functionToRetry can return a value upon correct execution -func RetryWithData[T any](functionToRetry func() (T, error), config *RetryConfig) (T, error) { +func RetryWithData[T any](functionToRetry func() (T, error), config *RetryParams) (T, error) { f := func() (T, error) { var ( val T @@ -173,7 +173,7 @@ func RetryWithData[T any](functionToRetry func() (T, error), config *RetryConfig // from the configuration are reached, or until a `PermanentError` is returned. // The function to be retried should return `PermanentError` when the condition for stop retrying // is met. -func Retry(functionToRetry func() error, config *RetryConfig) error { +func Retry(functionToRetry func() error, config *RetryParams) error { f := func() error { var err error func() { diff --git a/core/retry_test.go b/core/retry_test.go index d379a8279..82123490e 100644 --- a/core/retry_test.go +++ b/core/retry_test.go @@ -42,7 +42,7 @@ func TestRetryWithData(t *testing.T) { x, err := DummyFunction(43) return &x, err } - config := &retry.RetryConfig{ + config := &retry.RetryParams{ InitialInterval: 1000, MaxInterval: 2, MaxElapsedTime: 3, @@ -61,7 +61,7 @@ func TestRetry(t *testing.T) { _, err := DummyFunction(43) return err } - config := &retry.RetryConfig{ + config := &retry.RetryParams{ InitialInterval: 1000, MaxInterval: 2, MaxElapsedTime: 3, @@ -169,7 +169,7 @@ func TestWaitForTransactionReceipt(t *testing.T) { } // Assert Call succeeds when Anvil running - _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.EthCallRetryConfig()) + _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.EthCallRetryParams()) assert.NotNil(t, err, "Error Waiting for Transaction with Anvil Running: %s\n", err) if !strings.Contains(err.Error(), "not found") { t.Errorf("WaitForTransactionReceipt Emitted incorrect error: %s\n", err) @@ -181,7 +181,7 @@ func TestWaitForTransactionReceipt(t *testing.T) { return } - _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.EthCallRetryConfig()) + _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.EthCallRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("WaitForTransactionReceipt Emitted non Transient error: %s\n", err) @@ -197,7 +197,7 @@ func TestWaitForTransactionReceipt(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.EthCallRetryConfig()) + _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.EthCallRetryParams()) assert.NotNil(t, err) if !strings.Contains(err.Error(), "not found") { t.Errorf("WaitForTransactionReceipt Emitted incorrect error: %s\n", err) @@ -310,7 +310,7 @@ func TestSubscribeToNewTasksV3(t *testing.T) { t.Errorf("Error setting up Avs Service Bindings: %s\n", err) } - _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryConfig()) + _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -318,7 +318,7 @@ func TestSubscribeToNewTasksV3(t *testing.T) { return } - _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryConfig()) + _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("SubscribeToNewTasksV3 Emitted non Transient error: %s\n", err) @@ -334,7 +334,7 @@ func TestSubscribeToNewTasksV3(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryConfig()) + _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -360,7 +360,7 @@ func TestSubscribeToNewTasksV2(t *testing.T) { t.Errorf("Error setting up Avs Service Bindings: %s\n", err) } - _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryConfig()) + _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -368,7 +368,7 @@ func TestSubscribeToNewTasksV2(t *testing.T) { return } - _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryConfig()) + _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("SubscribeToNewTasksV2 Emitted non Transient error: %s\n", err) @@ -384,7 +384,7 @@ func TestSubscribeToNewTasksV2(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryConfig()) + _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -405,7 +405,7 @@ func TestBlockNumber(t *testing.T) { if err != nil { return } - _, err = sub.BlockNumberRetryable(context.Background(), retry.EthCallRetryConfig()) + _, err = sub.BlockNumberRetryable(context.Background(), retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -413,7 +413,7 @@ func TestBlockNumber(t *testing.T) { return } - _, err = sub.BlockNumberRetryable(context.Background(), retry.EthCallRetryConfig()) + _, err = sub.BlockNumberRetryable(context.Background(), retry.EthCallRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BlockNumber Emitted non Transient error: %s\n", err) @@ -429,7 +429,7 @@ func TestBlockNumber(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = sub.BlockNumberRetryable(context.Background(), retry.EthCallRetryConfig()) + _, err = sub.BlockNumberRetryable(context.Background(), retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -449,7 +449,7 @@ func TestFilterBatchV2(t *testing.T) { if err != nil { return } - _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) + _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -457,7 +457,7 @@ func TestFilterBatchV2(t *testing.T) { return } - _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) + _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("FilterBatchV2 Emitted non Transient error: %s\n", err) @@ -473,7 +473,7 @@ func TestFilterBatchV2(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) + _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -493,7 +493,7 @@ func TestFilterBatchV3(t *testing.T) { if err != nil { return } - _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) + _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -501,7 +501,7 @@ func TestFilterBatchV3(t *testing.T) { return } - _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) + _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("FilerBatchV3 Emitted non Transient error: %s\n", err) @@ -517,7 +517,7 @@ func TestFilterBatchV3(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryConfig()) + _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -539,7 +539,7 @@ func TestBatchesStateSubscriber(t *testing.T) { } zero_bytes := [32]byte{} - _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.EthCallRetryConfig()) + _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -547,7 +547,7 @@ func TestBatchesStateSubscriber(t *testing.T) { return } - _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.EthCallRetryConfig()) + _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.EthCallRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BatchesStateSubscriber Emitted non Transient error: %s\n", err) @@ -563,7 +563,7 @@ func TestBatchesStateSubscriber(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.EthCallRetryConfig()) + _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -585,7 +585,7 @@ func TestSubscribeNewHead(t *testing.T) { return } - _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryConfig()) + _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -593,7 +593,7 @@ func TestSubscribeNewHead(t *testing.T) { return } - _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryConfig()) + _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("SubscribeNewHead Emitted non Transient error: %s\n", err) @@ -609,7 +609,7 @@ func TestSubscribeNewHead(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryConfig()) + _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -662,7 +662,7 @@ func TestRespondToTaskV2(t *testing.T) { zero_bytes := [32]byte{} // NOTE: With zero bytes the tx reverts - _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.EthCallRetryConfig()) + _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.EthCallRetryParams()) assert.NotNil(t, err) if !strings.Contains(err.Error(), "execution reverted") { t.Errorf("RespondToTaskV2 did not emit the expected message: %q doesn't contain %q", err.Error(), "execution reverted: custom error 0x2396d34e:") @@ -672,7 +672,7 @@ func TestRespondToTaskV2(t *testing.T) { t.Errorf("Error killing process: %v\n", err) } - _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.EthCallRetryConfig()) + _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.EthCallRetryParams()) assert.NotNil(t, err) if _, ok := err.(*backoff.PermanentError); ok { t.Errorf("RespondToTaskV2 Emitted non-Transient error: %s\n", err) @@ -687,7 +687,7 @@ func TestRespondToTaskV2(t *testing.T) { } // NOTE: With zero bytes the tx reverts - _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.EthCallRetryConfig()) + _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.EthCallRetryParams()) assert.NotNil(t, err) if !strings.Contains(err.Error(), "execution reverted") { t.Errorf("RespondToTaskV2 did not emit the expected message: %q doesn't contain %q", err.Error(), "execution reverted: custom error 0x2396d34e:") @@ -715,7 +715,7 @@ func TestBatchesStateWriter(t *testing.T) { var bytes [32]byte num.FillBytes(bytes[:]) - _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.EthCallRetryConfig()) + _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -723,7 +723,8 @@ func TestBatchesStateWriter(t *testing.T) { return } - _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.EthCallRetryConfig()) + _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.EthCallRetryParams()) + assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BatchesStateWriter Emitted non-Transient error: %s\n", err) @@ -739,7 +740,7 @@ func TestBatchesStateWriter(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.EthCallRetryConfig()) + _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -762,7 +763,7 @@ func TestBalanceAt(t *testing.T) { aggregator_address := common.HexToAddress("0x0") blockHeight := big.NewInt(22) - _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.EthCallRetryConfig()) + _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -770,7 +771,7 @@ func TestBalanceAt(t *testing.T) { return } - _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.EthCallRetryConfig()) + _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.EthCallRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BalanceAt Emitted non-Transient error: %s\n", err) @@ -786,7 +787,7 @@ func TestBalanceAt(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.EthCallRetryConfig()) + _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -808,7 +809,7 @@ func TestBatchersBalances(t *testing.T) { } senderAddress := common.HexToAddress("0x0") - _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryConfig()) + _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -816,7 +817,7 @@ func TestBatchersBalances(t *testing.T) { return } - _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryConfig()) + _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BatchersBalances Emitted non-Transient error: %s\n", err) @@ -832,7 +833,7 @@ func TestBatchersBalances(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryConfig()) + _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { diff --git a/core/utils/eth_client_utils.go b/core/utils/eth_client_utils.go index c0c633977..bfbcc676f 100644 --- a/core/utils/eth_client_utils.go +++ b/core/utils/eth_client_utils.go @@ -19,7 +19,7 @@ import ( // Setting a higher value will imply doing less retries across the waitTimeout, and so we might lose the receipt // All errors are considered Transient Errors // - Retry times: 0.5s, 1s, 2s, 2s, 2s, ... until it reaches waitTimeout -func WaitForTransactionReceiptRetryable(client eth.InstrumentedClient, fallbackClient eth.InstrumentedClient, txHash gethcommon.Hash, config *retry.RetryConfig) (*types.Receipt, error) { +func WaitForTransactionReceiptRetryable(client eth.InstrumentedClient, fallbackClient eth.InstrumentedClient, txHash gethcommon.Hash, config *retry.RetryParams) (*types.Receipt, error) { receipt_func := func() (*types.Receipt, error) { receipt, err := client.TransactionReceipt(context.Background(), txHash) if err != nil { @@ -86,7 +86,7 @@ Get the gas price from the client with retry logic. - All errors are considered Transient Errors - Retry times: 1 sec, 2 sec, 4 sec */ -func GetGasPriceRetryable(client eth.InstrumentedClient, fallbackClient eth.InstrumentedClient, config *retry.RetryConfig) (*big.Int, error) { +func GetGasPriceRetryable(client eth.InstrumentedClient, fallbackClient eth.InstrumentedClient, config *retry.RetryParams) (*big.Int, error) { respondToTaskV2_func := func() (*big.Int, error) { gasPrice, err := client.SuggestGasPrice(context.Background()) if err != nil { From 4b66f8db8e67aed6b32d5169a73e903afb9fa77d Mon Sep 17 00:00:00 2001 From: JuArce <52429267+JuArce@users.noreply.github.com> Date: Thu, 21 Nov 2024 18:57:53 -0300 Subject: [PATCH 09/12] fix: functions name --- aggregator/pkg/server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aggregator/pkg/server.go b/aggregator/pkg/server.go index 02a72ff7c..b4059c4c6 100644 --- a/aggregator/pkg/server.go +++ b/aggregator/pkg/server.go @@ -54,7 +54,7 @@ func (agg *Aggregator) ProcessOperatorSignedTaskResponseV2(signedTaskResponse *t // If that's the case, we won't know about the task at this point // so we make GetTaskIndex retryable, waiting for some seconds, // before trying to fetch the task again from the map. - taskIndex, err := agg.GetTaskIndexRetryable(signedTaskResponse.BatchIdentifierHash, retry.EthCallRetryConfig()) + taskIndex, err := agg.GetTaskIndexRetryable(signedTaskResponse.BatchIdentifierHash, retry.EthCallRetryParams()) if err != nil { agg.logger.Warn("Task not found in the internal map, operator signature will be lost. Batch may not reach quorum") @@ -116,7 +116,7 @@ Checks Internal mapping for Signed Task Response, returns its TaskIndex. - Retry times (3 retries): 1 sec, 2 sec, 4 sec TODO: We should refactor the retry duration considering extending it to a larger time or number of retries, at least somewhere between 1 and 2 blocks */ -func (agg *Aggregator) GetTaskIndexRetryable(batchIdentifierHash [32]byte, config *retry.RetryConfig) (uint32, error) { +func (agg *Aggregator) GetTaskIndexRetryable(batchIdentifierHash [32]byte, config *retry.RetryParams) (uint32, error) { getTaskIndex_func := func() (uint32, error) { agg.taskMutex.Lock() taskIndex, ok := agg.batchesIdxByIdentifierHash[batchIdentifierHash] From 6357a72d0d572e9e7cecb15ed42c0758bcce8d38 Mon Sep 17 00:00:00 2001 From: PatStiles Date: Thu, 21 Nov 2024 19:19:34 -0300 Subject: [PATCH 10/12] Params names + make specific Params config for WaitForTransactionReceipt + RespondToTaskV2 --- aggregator/pkg/server.go | 2 +- core/chainio/avs_subscriber.go | 36 +++++++-------- core/chainio/avs_writer.go | 36 +++++---------- core/retry.go | 70 ++++++++++++++++++++--------- core/retry_test.go | 80 +++++++++++++++++----------------- 5 files changed, 119 insertions(+), 105 deletions(-) diff --git a/aggregator/pkg/server.go b/aggregator/pkg/server.go index b4059c4c6..4ec6488d0 100644 --- a/aggregator/pkg/server.go +++ b/aggregator/pkg/server.go @@ -54,7 +54,7 @@ func (agg *Aggregator) ProcessOperatorSignedTaskResponseV2(signedTaskResponse *t // If that's the case, we won't know about the task at this point // so we make GetTaskIndex retryable, waiting for some seconds, // before trying to fetch the task again from the map. - taskIndex, err := agg.GetTaskIndexRetryable(signedTaskResponse.BatchIdentifierHash, retry.EthCallRetryParams()) + taskIndex, err := agg.GetTaskIndexRetryable(signedTaskResponse.BatchIdentifierHash, retry.NetworkRetryParams()) if err != nil { agg.logger.Warn("Task not found in the internal map, operator signature will be lost. Batch may not reach quorum") diff --git a/core/chainio/avs_subscriber.go b/core/chainio/avs_subscriber.go index 10175bdc5..c40802058 100644 --- a/core/chainio/avs_subscriber.go +++ b/core/chainio/avs_subscriber.go @@ -66,15 +66,15 @@ func (s *AvsSubscriber) SubscribeToNewTasksV2(newTaskCreatedChan chan *servicema internalChannel := make(chan *servicemanager.ContractAlignedLayerServiceManagerNewBatchV2) // Subscribe to new tasks - sub, err := SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryParams()) + sub, err := SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.NetworkRetryParams()) if err != nil { - s.logger.Error("Primary failed to subscribe to new AlignedLayer V2 tasks after %d retries", retry.EthCallNumRetries, "err", err) + s.logger.Error("Primary failed to subscribe to new AlignedLayer V2 tasks after %d retries", retry.NetworkNumRetries, "err", err) return nil, err } - subFallback, err := SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryParams()) + subFallback, err := SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.NetworkRetryParams()) if err != nil { - s.logger.Error("Fallback failed to subscribe to new AlignedLayer V2 tasks after %d retries", retry.EthCallNumRetries, "err", err) + s.logger.Error("Fallback failed to subscribe to new AlignedLayer V2 tasks after %d retries", retry.NetworkNumRetries, "err", err) return nil, err } s.logger.Info("Subscribed to new AlignedLayer V2 tasks") @@ -114,14 +114,14 @@ func (s *AvsSubscriber) SubscribeToNewTasksV2(newTaskCreatedChan chan *servicema case err := <-sub.Err(): s.logger.Warn("Error in new task subscription", "err", err) sub.Unsubscribe() - sub, err = SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryParams()) + sub, err = SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.NetworkRetryParams()) if err != nil { errorChannel <- err } case err := <-subFallback.Err(): s.logger.Warn("Error in fallback new task subscription", "err", err) subFallback.Unsubscribe() - subFallback, err = SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryParams()) + subFallback, err = SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.NetworkRetryParams()) if err != nil { errorChannel <- err } @@ -137,13 +137,13 @@ func (s *AvsSubscriber) SubscribeToNewTasksV3(newTaskCreatedChan chan *servicema internalChannel := make(chan *servicemanager.ContractAlignedLayerServiceManagerNewBatchV3) // Subscribe to new tasks - sub, err := SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryParams()) + sub, err := SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.NetworkRetryParams()) if err != nil { s.logger.Error("Primary failed to subscribe to new AlignedLayer V3 tasks after %d retries", MaxRetries, "err", err) return nil, err } - subFallback, err := SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryParams()) + subFallback, err := SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.NetworkRetryParams()) if err != nil { s.logger.Error("Fallback failed to subscribe to new AlignedLayer V3 tasks after %d retries", MaxRetries, "err", err) return nil, err @@ -185,14 +185,14 @@ func (s *AvsSubscriber) SubscribeToNewTasksV3(newTaskCreatedChan chan *servicema case err := <-sub.Err(): s.logger.Warn("Error in new task subscription", "err", err) sub.Unsubscribe() - sub, err = SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.EthCallRetryParams()) + sub, err = SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManager, internalChannel, nil, retry.NetworkRetryParams()) if err != nil { errorChannel <- err } case err := <-subFallback.Err(): s.logger.Warn("Error in fallback new task subscription", "err", err) subFallback.Unsubscribe() - subFallback, err = SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.EthCallRetryParams()) + subFallback, err = SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.AvsContractBindings.ServiceManagerFallback, internalChannel, nil, retry.NetworkRetryParams()) if err != nil { errorChannel <- err } @@ -258,7 +258,7 @@ func (s *AvsSubscriber) processNewBatchV3(batch *servicemanager.ContractAlignedL // getLatestNotRespondedTaskFromEthereum queries the blockchain for the latest not responded task using the FilterNewBatch method. func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV2() (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV2, error) { - latestBlock, err := s.BlockNumberRetryable(context.Background(), retry.EthCallRetryParams()) + latestBlock, err := s.BlockNumberRetryable(context.Background(), retry.NetworkRetryParams()) if err != nil { return nil, err } @@ -271,7 +271,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV2() (*servicemanag fromBlock = latestBlock - BlockInterval } - logs, err := s.FilterBatchV2Retryable(&bind.FilterOpts{Start: fromBlock, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) + logs, err := s.FilterBatchV2Retryable(&bind.FilterOpts{Start: fromBlock, End: nil, Context: context.Background()}, nil, retry.NetworkRetryParams()) if err != nil { return nil, err } @@ -293,7 +293,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV2() (*servicemanag batchIdentifier := append(lastLog.BatchMerkleRoot[:], lastLog.SenderAddress[:]...) batchIdentifierHash := *(*[32]byte)(crypto.Keccak256(batchIdentifier)) - state, err := s.BatchesStateRetryable(nil, batchIdentifierHash, retry.EthCallRetryParams()) + state, err := s.BatchesStateRetryable(nil, batchIdentifierHash, retry.NetworkRetryParams()) if err != nil { return nil, err } @@ -307,7 +307,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV2() (*servicemanag // getLatestNotRespondedTaskFromEthereum queries the blockchain for the latest not responded task using the FilterNewBatch method. func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV3() (*servicemanager.ContractAlignedLayerServiceManagerNewBatchV3, error) { - latestBlock, err := s.BlockNumberRetryable(context.Background(), retry.EthCallRetryParams()) + latestBlock, err := s.BlockNumberRetryable(context.Background(), retry.NetworkRetryParams()) if err != nil { return nil, err } @@ -320,7 +320,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV3() (*servicemanag fromBlock = latestBlock - BlockInterval } - logs, err := s.FilterBatchV3Retryable(&bind.FilterOpts{Start: fromBlock, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) + logs, err := s.FilterBatchV3Retryable(&bind.FilterOpts{Start: fromBlock, End: nil, Context: context.Background()}, nil, retry.NetworkRetryParams()) if err != nil { return nil, err } @@ -342,7 +342,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV3() (*servicemanag batchIdentifier := append(lastLog.BatchMerkleRoot[:], lastLog.SenderAddress[:]...) batchIdentifierHash := *(*[32]byte)(crypto.Keccak256(batchIdentifier)) - state, err := s.BatchesStateRetryable(nil, batchIdentifierHash, retry.EthCallRetryParams()) + state, err := s.BatchesStateRetryable(nil, batchIdentifierHash, retry.NetworkRetryParams()) if err != nil { return nil, err } @@ -355,7 +355,7 @@ func (s *AvsSubscriber) getLatestNotRespondedTaskFromEthereumV3() (*servicemanag } func (s *AvsSubscriber) WaitForOneBlock(startBlock uint64) error { - currentBlock, err := s.BlockNumberRetryable(context.Background(), retry.EthCallRetryParams()) + currentBlock, err := s.BlockNumberRetryable(context.Background(), retry.NetworkRetryParams()) if err != nil { return err } @@ -363,7 +363,7 @@ func (s *AvsSubscriber) WaitForOneBlock(startBlock uint64) error { if currentBlock <= startBlock { // should really be == but just in case // Subscribe to new head c := make(chan *types.Header) - sub, err := s.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryParams()) + sub, err := s.SubscribeNewHeadRetryable(context.Background(), c, retry.NetworkRetryParams()) if err != nil { return err } diff --git a/core/chainio/avs_writer.go b/core/chainio/avs_writer.go index 7285282cb..bfcf87f07 100644 --- a/core/chainio/avs_writer.go +++ b/core/chainio/avs_writer.go @@ -22,14 +22,6 @@ import ( "github.com/yetanotherco/aligned_layer/metrics" ) -const ( - waitForTxMaxInterval = 2 * time.Second - waitForTxNumRetries = 0 - respondToTaskV2NumRetries uint64 = 0 - respondToTaskV2MaxInterval = time.Millisecond * 500 - respondToTaskV2MaxElapsedTime = 0 -) - type AvsWriter struct { *avsregistry.ChainWriter AvsContractBindings *AvsServiceBindings @@ -100,7 +92,7 @@ func NewAvsWriterFromConfig(baseConfig *config.BaseConfig, ecdsaConfig *config.E func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMerkleRoot [32]byte, senderAddress [20]byte, nonSignerStakesAndSignature servicemanager.IBLSSignatureCheckerNonSignerStakesAndSignature, gasBumpPercentage uint, gasBumpIncrementalPercentage uint, timeToWaitBeforeBump time.Duration, onGasPriceBumped func(*big.Int)) (*types.Receipt, error) { txOpts := *w.Signer.GetTxOpts() txOpts.NoSend = true // simulate the transaction - simTx, err := w.RespondToTaskV2Retryable(&txOpts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature, retry.ChainRetryParams()) + simTx, err := w.RespondToTaskV2Retryable(&txOpts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature, retry.SendToChainRetryParams()) if err != nil { return nil, err } @@ -112,16 +104,8 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe txOpts.NoSend = false i := 0 - // Set Retry config for RespondToTaskV2 - respondToTaskV2Config := retry.EthCallRetryParams() - respondToTaskV2Config.NumRetries = respondToTaskV2NumRetries - respondToTaskV2Config.MaxInterval = respondToTaskV2MaxInterval - respondToTaskV2Config.MaxElapsedTime = respondToTaskV2MaxElapsedTime - // Set Retry config for WaitForTxRetryable - waitForTxConfig := retry.EthCallRetryParams() - waitForTxConfig.MaxInterval = waitForTxMaxInterval - waitForTxConfig.NumRetries = waitForTxNumRetries + waitForTxConfig := retry.WaitForTxRetryParams() waitForTxConfig.MaxElapsedTime = timeToWaitBeforeBump var sentTxs []*types.Transaction @@ -129,7 +113,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe batchMerkleRootHashString := hex.EncodeToString(batchMerkleRoot[:]) respondToTaskV2Func := func() (*types.Receipt, error) { - gasPrice, err := utils.GetGasPriceRetryable(w.Client, w.ClientFallback, retry.EthCallRetryParams()) + gasPrice, err := utils.GetGasPriceRetryable(w.Client, w.ClientFallback, retry.NetworkRetryParams()) if err != nil { return nil, err } @@ -164,7 +148,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe } } w.logger.Infof("Receipts for old transactions not found, will check if the batch state has been responded", "merkle root", batchMerkleRootHashString) - batchState, _ := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.ChainRetryParams()) + batchState, _ := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.SendToChainRetryParams()) if batchState.Responded { w.logger.Infof("Batch state has been already responded", "merkle root", batchMerkleRootHashString) return nil, nil @@ -183,7 +167,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe } w.logger.Infof("Sending RespondToTask transaction with a gas price of %v", txOpts.GasPrice, "merkle root", batchMerkleRootHashString) - realTx, err := w.RespondToTaskV2Retryable(&txOpts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature, retry.ChainRetryParams()) + realTx, err := w.RespondToTaskV2Retryable(&txOpts, batchMerkleRoot, senderAddress, nonSignerStakesAndSignature, retry.SendToChainRetryParams()) if err != nil { w.logger.Errorf("Respond to task transaction err, %v", err, "merkle root", batchMerkleRootHashString) return nil, err @@ -211,14 +195,14 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe // This just retries the bump of a fee in case of a timeout // The wait is done before on WaitForTransactionReceiptRetryable, and all the functions are retriable, // so this retry doesn't need to wait more time - return retry.RetryWithData(respondToTaskV2Func, respondToTaskV2Config) + return retry.RetryWithData(respondToTaskV2Func, retry.RespondToTaskV2()) } // Calculates the transaction cost from the receipt and compares it with the batcher respondToTaskFeeLimit // if the tx cost was higher, then it means the aggregator has paid the difference for the batcher (txCost - respondToTaskFeeLimit) and so metrics are updated accordingly. // otherwise nothing is done. func (w *AvsWriter) checkIfAggregatorHadToPaidForBatcher(tx *types.Transaction, batchIdentifierHash [32]byte) { - batchState, err := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.EthCallRetryParams()) + batchState, err := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.NetworkRetryParams()) if err != nil { return } @@ -244,7 +228,7 @@ func (w *AvsWriter) checkAggAndBatcherHaveEnoughBalance(tx *types.Transaction, t txCost := new(big.Int).Mul(txGasAsBigInt, txGasPrice) w.logger.Info("Transaction cost", "cost", txCost) - batchState, err := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.EthCallRetryParams()) + batchState, err := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.NetworkRetryParams()) if err != nil { w.logger.Error("Failed to get batch state", "error", err) w.logger.Info("Proceeding to check balances against transaction cost") @@ -272,7 +256,7 @@ func (w *AvsWriter) compareAggregatorBalance(amount *big.Int, aggregatorAddress ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - aggregatorBalance, err := w.BalanceAtRetryable(ctx, aggregatorAddress, nil, retry.EthCallRetryParams()) + aggregatorBalance, err := w.BalanceAtRetryable(ctx, aggregatorAddress, nil, retry.NetworkRetryParams()) if err != nil { // Ignore and continue. w.logger.Error("failed to get aggregator balance: %v", err) @@ -287,7 +271,7 @@ func (w *AvsWriter) compareAggregatorBalance(amount *big.Int, aggregatorAddress func (w *AvsWriter) compareBatcherBalance(amount *big.Int, senderAddress [20]byte) error { // Get batcher balance - batcherBalance, err := w.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryParams()) + batcherBalance, err := w.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.NetworkRetryParams()) if err != nil { // Ignore and continue. w.logger.Error("Failed to get batcherBalance", "error", err) diff --git a/core/retry.go b/core/retry.go index e4f67c8ab..9c79600d4 100644 --- a/core/retry.go +++ b/core/retry.go @@ -25,14 +25,22 @@ func (e PermanentError) Is(err error) bool { } const ( - EthCallInitialInterval = 1 * time.Second // Initial delay for retry interval. - EthCallMaxInterval = 60 * time.Second // Maximum interval an individual retry may have. - EthCallMaxElapsedTime = 0 * time.Second // Maximum time all retries may take. `0` corresponds to no limit on the time of the retries. - EthCallRandomizationFactor float64 = 0 // Randomization (Jitter) factor used to map retry interval to a range of values around the computed interval. In precise terms (random value in range [1 - randomizationfactor, 1 + randomizationfactor]). NOTE: This is set to 0 as we do not use jitter in Aligned. - EthCallMultiplier float64 = 2 // Multiplier factor computed exponential retry interval is scaled by. - EthCallNumRetries uint64 = 3 // Total number of retries attempted. - ChainInitialInterval = 12 * time.Second // Initial delay for retry interval for contract calls. Corresponds to 1 ethereum block. - ChainMaxInterval = 2 * time.Minute // Maximum interval for an individual retry. + NetworkInitialInterval = 1 * time.Second // Initial delay for retry interval. + NetworkMaxInterval = 60 * time.Second // Maximum interval an individual retry may have. + NetworkMaxElapsedTime = 0 * time.Second // Maximum time all retries may take. `0` corresponds to no limit on the time of the retries. + NetworkRandomizationFactor float64 = 0 // Randomization (Jitter) factor used to map retry interval to a range of values around the computed interval. In precise terms (random value in range [1 - randomizationfactor, 1 + randomizationfactor]). NOTE: This is set to 0 as we do not use jitter in Aligned. + NetworkMultiplier float64 = 2 // Multiplier factor computed exponential retry interval is scaled by. + NetworkNumRetries uint64 = 3 // Total number of retries attempted. + // Retry Params for Sending Tx to Chain + ChainInitialInterval = 12 * time.Second // Initial delay for retry interval for contract calls. Corresponds to 1 ethereum block. + ChainMaxInterval = 2 * time.Minute // Maximum interval for an individual retry. + // Retry Params for WaitForTransactionReceipt in the Fee Bump + WaitForTxMaxInterval = 2 * time.Second + WaitForTxNumRetries = 0 + // Retry Parameters for RespondToTaskV2 in the Fee Bump + RespondToTaskV2MaxInterval = time.Millisecond * 500 + RespondToTaskV2MaxElapsedTime = 0 + RespondToTaskV2NumRetries uint64 = 0 ) type RetryParams struct { @@ -44,25 +52,47 @@ type RetryParams struct { NumRetries uint64 } -func EthCallRetryParams() *RetryParams { +func NetworkRetryParams() *RetryParams { return &RetryParams{ - InitialInterval: EthCallInitialInterval, - MaxInterval: EthCallMaxInterval, - MaxElapsedTime: EthCallMaxElapsedTime, - RandomizationFactor: EthCallRandomizationFactor, - Multiplier: EthCallMultiplier, - NumRetries: EthCallNumRetries, + InitialInterval: NetworkInitialInterval, + MaxInterval: NetworkMaxInterval, + MaxElapsedTime: NetworkMaxElapsedTime, + RandomizationFactor: NetworkRandomizationFactor, + Multiplier: NetworkMultiplier, + NumRetries: NetworkNumRetries, } } -func ChainRetryParams() *RetryParams { +func SendToChainRetryParams() *RetryParams { return &RetryParams{ InitialInterval: ChainInitialInterval, MaxInterval: ChainMaxInterval, - MaxElapsedTime: EthCallMaxElapsedTime, - RandomizationFactor: EthCallRandomizationFactor, - Multiplier: EthCallMultiplier, - NumRetries: EthCallNumRetries, + MaxElapsedTime: NetworkMaxElapsedTime, + RandomizationFactor: NetworkRandomizationFactor, + Multiplier: NetworkMultiplier, + NumRetries: NetworkNumRetries, + } +} + +func RespondToTaskV2() *RetryParams { + return &RetryParams{ + InitialInterval: ChainInitialInterval, + MaxInterval: RespondToTaskV2MaxInterval, + MaxElapsedTime: RespondToTaskV2MaxElapsedTime, + RandomizationFactor: NetworkRandomizationFactor, + Multiplier: NetworkMultiplier, + NumRetries: RespondToTaskV2NumRetries, + } +} + +func WaitForTxRetryParams() *RetryParams { + return &RetryParams{ + InitialInterval: NetworkInitialInterval, + MaxInterval: WaitForTxMaxInterval, + MaxElapsedTime: NetworkMaxElapsedTime, + RandomizationFactor: NetworkRandomizationFactor, + Multiplier: NetworkMultiplier, + NumRetries: WaitForTxNumRetries, } } diff --git a/core/retry_test.go b/core/retry_test.go index 82123490e..8e9fcdefb 100644 --- a/core/retry_test.go +++ b/core/retry_test.go @@ -47,8 +47,8 @@ func TestRetryWithData(t *testing.T) { MaxInterval: 2, MaxElapsedTime: 3, RandomizationFactor: 0, - Multiplier: retry.EthCallMultiplier, - NumRetries: retry.EthCallNumRetries, + Multiplier: retry.NetworkMultiplier, + NumRetries: retry.NetworkNumRetries, } _, err := retry.RetryWithData(function, config) if err != nil { @@ -66,8 +66,8 @@ func TestRetry(t *testing.T) { MaxInterval: 2, MaxElapsedTime: 3, RandomizationFactor: 0, - Multiplier: retry.EthCallMultiplier, - NumRetries: retry.EthCallNumRetries, + Multiplier: retry.NetworkMultiplier, + NumRetries: retry.NetworkNumRetries, } err := retry.Retry(function, config) if err != nil { @@ -169,7 +169,7 @@ func TestWaitForTransactionReceipt(t *testing.T) { } // Assert Call succeeds when Anvil running - _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.EthCallRetryParams()) + _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.NetworkRetryParams()) assert.NotNil(t, err, "Error Waiting for Transaction with Anvil Running: %s\n", err) if !strings.Contains(err.Error(), "not found") { t.Errorf("WaitForTransactionReceipt Emitted incorrect error: %s\n", err) @@ -181,7 +181,7 @@ func TestWaitForTransactionReceipt(t *testing.T) { return } - _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.EthCallRetryParams()) + _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.NetworkRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("WaitForTransactionReceipt Emitted non Transient error: %s\n", err) @@ -197,7 +197,7 @@ func TestWaitForTransactionReceipt(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.EthCallRetryParams()) + _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.NetworkRetryParams()) assert.NotNil(t, err) if !strings.Contains(err.Error(), "not found") { t.Errorf("WaitForTransactionReceipt Emitted incorrect error: %s\n", err) @@ -310,7 +310,7 @@ func TestSubscribeToNewTasksV3(t *testing.T) { t.Errorf("Error setting up Avs Service Bindings: %s\n", err) } - _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryParams()) + _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -318,7 +318,7 @@ func TestSubscribeToNewTasksV3(t *testing.T) { return } - _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryParams()) + _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.NetworkRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("SubscribeToNewTasksV3 Emitted non Transient error: %s\n", err) @@ -334,7 +334,7 @@ func TestSubscribeToNewTasksV3(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryParams()) + _, err = chainio.SubscribeToNewTasksV3Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -360,7 +360,7 @@ func TestSubscribeToNewTasksV2(t *testing.T) { t.Errorf("Error setting up Avs Service Bindings: %s\n", err) } - _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryParams()) + _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -368,7 +368,7 @@ func TestSubscribeToNewTasksV2(t *testing.T) { return } - _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryParams()) + _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.NetworkRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("SubscribeToNewTasksV2 Emitted non Transient error: %s\n", err) @@ -384,7 +384,7 @@ func TestSubscribeToNewTasksV2(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.EthCallRetryParams()) + _, err = chainio.SubscribeToNewTasksV2Retryable(&bind.WatchOpts{}, s.ServiceManager, channel, nil, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -405,7 +405,7 @@ func TestBlockNumber(t *testing.T) { if err != nil { return } - _, err = sub.BlockNumberRetryable(context.Background(), retry.EthCallRetryParams()) + _, err = sub.BlockNumberRetryable(context.Background(), retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -413,7 +413,7 @@ func TestBlockNumber(t *testing.T) { return } - _, err = sub.BlockNumberRetryable(context.Background(), retry.EthCallRetryParams()) + _, err = sub.BlockNumberRetryable(context.Background(), retry.NetworkRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BlockNumber Emitted non Transient error: %s\n", err) @@ -429,7 +429,7 @@ func TestBlockNumber(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = sub.BlockNumberRetryable(context.Background(), retry.EthCallRetryParams()) + _, err = sub.BlockNumberRetryable(context.Background(), retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -449,7 +449,7 @@ func TestFilterBatchV2(t *testing.T) { if err != nil { return } - _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) + _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -457,7 +457,7 @@ func TestFilterBatchV2(t *testing.T) { return } - _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) + _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.NetworkRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("FilterBatchV2 Emitted non Transient error: %s\n", err) @@ -473,7 +473,7 @@ func TestFilterBatchV2(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) + _, err = avsSubscriber.FilterBatchV2Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -493,7 +493,7 @@ func TestFilterBatchV3(t *testing.T) { if err != nil { return } - _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) + _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -501,7 +501,7 @@ func TestFilterBatchV3(t *testing.T) { return } - _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) + _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.NetworkRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("FilerBatchV3 Emitted non Transient error: %s\n", err) @@ -517,7 +517,7 @@ func TestFilterBatchV3(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.EthCallRetryParams()) + _, err = avsSubscriber.FilterBatchV3Retryable(&bind.FilterOpts{Start: 0, End: nil, Context: context.Background()}, nil, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -539,7 +539,7 @@ func TestBatchesStateSubscriber(t *testing.T) { } zero_bytes := [32]byte{} - _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.EthCallRetryParams()) + _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -547,7 +547,7 @@ func TestBatchesStateSubscriber(t *testing.T) { return } - _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.EthCallRetryParams()) + _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.NetworkRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BatchesStateSubscriber Emitted non Transient error: %s\n", err) @@ -563,7 +563,7 @@ func TestBatchesStateSubscriber(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.EthCallRetryParams()) + _, err = avsSubscriber.BatchesStateRetryable(nil, zero_bytes, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -585,7 +585,7 @@ func TestSubscribeNewHead(t *testing.T) { return } - _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryParams()) + _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -593,7 +593,7 @@ func TestSubscribeNewHead(t *testing.T) { return } - _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryParams()) + _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.NetworkRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("SubscribeNewHead Emitted non Transient error: %s\n", err) @@ -609,7 +609,7 @@ func TestSubscribeNewHead(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.EthCallRetryParams()) + _, err = avsSubscriber.SubscribeNewHeadRetryable(context.Background(), c, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -662,7 +662,7 @@ func TestRespondToTaskV2(t *testing.T) { zero_bytes := [32]byte{} // NOTE: With zero bytes the tx reverts - _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.EthCallRetryParams()) + _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.NetworkRetryParams()) assert.NotNil(t, err) if !strings.Contains(err.Error(), "execution reverted") { t.Errorf("RespondToTaskV2 did not emit the expected message: %q doesn't contain %q", err.Error(), "execution reverted: custom error 0x2396d34e:") @@ -672,7 +672,7 @@ func TestRespondToTaskV2(t *testing.T) { t.Errorf("Error killing process: %v\n", err) } - _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.EthCallRetryParams()) + _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.NetworkRetryParams()) assert.NotNil(t, err) if _, ok := err.(*backoff.PermanentError); ok { t.Errorf("RespondToTaskV2 Emitted non-Transient error: %s\n", err) @@ -687,7 +687,7 @@ func TestRespondToTaskV2(t *testing.T) { } // NOTE: With zero bytes the tx reverts - _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.EthCallRetryParams()) + _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.NetworkRetryParams()) assert.NotNil(t, err) if !strings.Contains(err.Error(), "execution reverted") { t.Errorf("RespondToTaskV2 did not emit the expected message: %q doesn't contain %q", err.Error(), "execution reverted: custom error 0x2396d34e:") @@ -715,7 +715,7 @@ func TestBatchesStateWriter(t *testing.T) { var bytes [32]byte num.FillBytes(bytes[:]) - _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.EthCallRetryParams()) + _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -723,7 +723,7 @@ func TestBatchesStateWriter(t *testing.T) { return } - _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.EthCallRetryParams()) + _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.NetworkRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { @@ -740,7 +740,7 @@ func TestBatchesStateWriter(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.EthCallRetryParams()) + _, err = avsWriter.BatchesStateRetryable(&bind.CallOpts{}, bytes, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -763,7 +763,7 @@ func TestBalanceAt(t *testing.T) { aggregator_address := common.HexToAddress("0x0") blockHeight := big.NewInt(22) - _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.EthCallRetryParams()) + _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -771,7 +771,7 @@ func TestBalanceAt(t *testing.T) { return } - _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.EthCallRetryParams()) + _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.NetworkRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BalanceAt Emitted non-Transient error: %s\n", err) @@ -787,7 +787,7 @@ func TestBalanceAt(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.EthCallRetryParams()) + _, err = avsWriter.BalanceAtRetryable(context.Background(), aggregator_address, blockHeight, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -809,7 +809,7 @@ func TestBatchersBalances(t *testing.T) { } senderAddress := common.HexToAddress("0x0") - _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryParams()) + _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { @@ -817,7 +817,7 @@ func TestBatchersBalances(t *testing.T) { return } - _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryParams()) + _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.NetworkRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("BatchersBalances Emitted non-Transient error: %s\n", err) @@ -833,7 +833,7 @@ func TestBatchersBalances(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.EthCallRetryParams()) + _, err = avsWriter.BatcherBalancesRetryable(&bind.CallOpts{}, senderAddress, retry.NetworkRetryParams()) assert.Nil(t, err) if err := cmd.Process.Kill(); err != nil { From feafc6f5c39255a6bebb7501c4d6757ffaa3409a Mon Sep 17 00:00:00 2001 From: JuArce <52429267+JuArce@users.noreply.github.com> Date: Thu, 21 Nov 2024 20:13:32 -0300 Subject: [PATCH 11/12] fix: receive maxElapsedTime in WaitForTxRetryParams as parameter --- core/chainio/avs_writer.go | 6 +----- core/retry.go | 20 +++++++++++++------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/core/chainio/avs_writer.go b/core/chainio/avs_writer.go index bfcf87f07..bb88eea24 100644 --- a/core/chainio/avs_writer.go +++ b/core/chainio/avs_writer.go @@ -104,10 +104,6 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe txOpts.NoSend = false i := 0 - // Set Retry config for WaitForTxRetryable - waitForTxConfig := retry.WaitForTxRetryParams() - waitForTxConfig.MaxElapsedTime = timeToWaitBeforeBump - var sentTxs []*types.Transaction batchMerkleRootHashString := hex.EncodeToString(batchMerkleRoot[:]) @@ -175,7 +171,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe sentTxs = append(sentTxs, realTx) w.logger.Infof("Transaction sent, waiting for receipt", "merkle root", batchMerkleRootHashString) - receipt, err := utils.WaitForTransactionReceiptRetryable(w.Client, w.ClientFallback, realTx.Hash(), waitForTxConfig) + receipt, err := utils.WaitForTransactionReceiptRetryable(w.Client, w.ClientFallback, realTx.Hash(), retry.WaitForTxRetryParams(timeToWaitBeforeBump)) if receipt != nil { w.checkIfAggregatorHadToPaidForBatcher(realTx, batchIdentifierHash) return receipt, nil diff --git a/core/retry.go b/core/retry.go index 9c79600d4..fb39e8d8d 100644 --- a/core/retry.go +++ b/core/retry.go @@ -31,16 +31,19 @@ const ( NetworkRandomizationFactor float64 = 0 // Randomization (Jitter) factor used to map retry interval to a range of values around the computed interval. In precise terms (random value in range [1 - randomizationfactor, 1 + randomizationfactor]). NOTE: This is set to 0 as we do not use jitter in Aligned. NetworkMultiplier float64 = 2 // Multiplier factor computed exponential retry interval is scaled by. NetworkNumRetries uint64 = 3 // Total number of retries attempted. + // Retry Params for Sending Tx to Chain ChainInitialInterval = 12 * time.Second // Initial delay for retry interval for contract calls. Corresponds to 1 ethereum block. ChainMaxInterval = 2 * time.Minute // Maximum interval for an individual retry. + // Retry Params for WaitForTransactionReceipt in the Fee Bump - WaitForTxMaxInterval = 2 * time.Second - WaitForTxNumRetries = 0 + WaitForTxMaxInterval = 2 * time.Second // Maximum interval for an individual retry. + WaitForTxNumRetries = 0 // Total number of retries attempted. If 0, retries indefinitely until maxElapsedTime is reached. + // Retry Parameters for RespondToTaskV2 in the Fee Bump - RespondToTaskV2MaxInterval = time.Millisecond * 500 - RespondToTaskV2MaxElapsedTime = 0 - RespondToTaskV2NumRetries uint64 = 0 + RespondToTaskV2MaxInterval = time.Millisecond * 500 // Maximum interval for an individual retry. + RespondToTaskV2MaxElapsedTime = 0 // Maximum time all retries may take. `0` corresponds to no limit on the time of the retries. + RespondToTaskV2NumRetries uint64 = 0 // Total number of retries attempted. If 0, retries indefinitely until maxElapsedTime is reached. ) type RetryParams struct { @@ -85,11 +88,14 @@ func RespondToTaskV2() *RetryParams { } } -func WaitForTxRetryParams() *RetryParams { +// WaitForTxRetryParams returns the retry parameters for waiting for a transaction to be included in a block. +// maxElapsedTime is received as parameter to allow for a custom timeout +// These parameters are used for the bumping fees logic. +func WaitForTxRetryParams(maxElapsedTime time.Duration) *RetryParams { return &RetryParams{ InitialInterval: NetworkInitialInterval, MaxInterval: WaitForTxMaxInterval, - MaxElapsedTime: NetworkMaxElapsedTime, + MaxElapsedTime: maxElapsedTime, RandomizationFactor: NetworkRandomizationFactor, Multiplier: NetworkMultiplier, NumRetries: WaitForTxNumRetries, From 7ada6f980596ba04c8051b942ca06385f9336624 Mon Sep 17 00:00:00 2001 From: PatStiles Date: Thu, 21 Nov 2024 20:22:18 -0300 Subject: [PATCH 12/12] params nits --- core/chainio/avs_writer.go | 2 +- core/retry_test.go | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/core/chainio/avs_writer.go b/core/chainio/avs_writer.go index bb88eea24..d712f9013 100644 --- a/core/chainio/avs_writer.go +++ b/core/chainio/avs_writer.go @@ -144,7 +144,7 @@ func (w *AvsWriter) SendAggregatedResponse(batchIdentifierHash [32]byte, batchMe } } w.logger.Infof("Receipts for old transactions not found, will check if the batch state has been responded", "merkle root", batchMerkleRootHashString) - batchState, _ := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.SendToChainRetryParams()) + batchState, _ := w.BatchesStateRetryable(&bind.CallOpts{}, batchIdentifierHash, retry.NetworkRetryParams()) if batchState.Responded { w.logger.Infof("Batch state has been already responded", "merkle root", batchMerkleRootHashString) return nil, nil diff --git a/core/retry_test.go b/core/retry_test.go index 8e9fcdefb..9b2f416b0 100644 --- a/core/retry_test.go +++ b/core/retry_test.go @@ -169,7 +169,7 @@ func TestWaitForTransactionReceipt(t *testing.T) { } // Assert Call succeeds when Anvil running - _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.NetworkRetryParams()) + _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.SendToChainRetryParams()) assert.NotNil(t, err, "Error Waiting for Transaction with Anvil Running: %s\n", err) if !strings.Contains(err.Error(), "not found") { t.Errorf("WaitForTransactionReceipt Emitted incorrect error: %s\n", err) @@ -181,7 +181,7 @@ func TestWaitForTransactionReceipt(t *testing.T) { return } - _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.NetworkRetryParams()) + _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.SendToChainRetryParams()) assert.NotNil(t, err) if _, ok := err.(retry.PermanentError); ok { t.Errorf("WaitForTransactionReceipt Emitted non Transient error: %s\n", err) @@ -197,7 +197,7 @@ func TestWaitForTransactionReceipt(t *testing.T) { t.Errorf("Error setting up Anvil: %s\n", err) } - _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.NetworkRetryParams()) + _, err = utils.WaitForTransactionReceiptRetryable(*client, *client, hash, retry.SendToChainRetryParams()) assert.NotNil(t, err) if !strings.Contains(err.Error(), "not found") { t.Errorf("WaitForTransactionReceipt Emitted incorrect error: %s\n", err) @@ -662,7 +662,7 @@ func TestRespondToTaskV2(t *testing.T) { zero_bytes := [32]byte{} // NOTE: With zero bytes the tx reverts - _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.NetworkRetryParams()) + _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.SendToChainRetryParams()) assert.NotNil(t, err) if !strings.Contains(err.Error(), "execution reverted") { t.Errorf("RespondToTaskV2 did not emit the expected message: %q doesn't contain %q", err.Error(), "execution reverted: custom error 0x2396d34e:") @@ -672,7 +672,7 @@ func TestRespondToTaskV2(t *testing.T) { t.Errorf("Error killing process: %v\n", err) } - _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.NetworkRetryParams()) + _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.SendToChainRetryParams()) assert.NotNil(t, err) if _, ok := err.(*backoff.PermanentError); ok { t.Errorf("RespondToTaskV2 Emitted non-Transient error: %s\n", err) @@ -687,7 +687,7 @@ func TestRespondToTaskV2(t *testing.T) { } // NOTE: With zero bytes the tx reverts - _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.NetworkRetryParams()) + _, err = w.RespondToTaskV2Retryable(&txOpts, zero_bytes, aggregator_address, nonSignerStakesAndSignature, retry.SendToChainRetryParams()) assert.NotNil(t, err) if !strings.Contains(err.Error(), "execution reverted") { t.Errorf("RespondToTaskV2 did not emit the expected message: %q doesn't contain %q", err.Error(), "execution reverted: custom error 0x2396d34e:")