From f985d16fde229470c7e83e5e3e43e648e31ea843 Mon Sep 17 00:00:00 2001 From: "app-token-issuer-infra-releng[bot]" <120227048+app-token-issuer-infra-releng[bot]@users.noreply.github.com> Date: Thu, 7 Nov 2024 07:24:15 -0700 Subject: [PATCH 01/12] [automated] bump solana image to v2.0.14 (#917) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- integration-tests/testconfig/default.toml | 2 +- scripts/install-solana-ci.sh | 2 +- scripts/setup-localnet/localnet.sh | 2 +- solana.nix | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/integration-tests/testconfig/default.toml b/integration-tests/testconfig/default.toml index 0cab23983..dd1f96a99 100644 --- a/integration-tests/testconfig/default.toml +++ b/integration-tests/testconfig/default.toml @@ -38,7 +38,7 @@ inside_k8 = false network = "localnet" user = "default" stateful_db = false -devnet_image = "anzaxyz/agave:v1.18.26" +devnet_image = "anzaxyz/agave:v2.0.14" [OCR2] node_count = 6 diff --git a/scripts/install-solana-ci.sh b/scripts/install-solana-ci.sh index 141f5930e..b347ef17d 100755 --- a/scripts/install-solana-ci.sh +++ b/scripts/install-solana-ci.sh @@ -2,5 +2,5 @@ set -euxo pipefail -sh -c "$(curl -sSfL https://release.anza.xyz/v1.18.26/install)" +sh -c "$(curl -sSfL https://release.anza.xyz/v2.0.14/install)" echo "PATH=$HOME/.local/share/solana/install/active_release/bin:$PATH" >> $GITHUB_ENV diff --git a/scripts/setup-localnet/localnet.sh b/scripts/setup-localnet/localnet.sh index c0e6cd679..40f602f2c 100755 --- a/scripts/setup-localnet/localnet.sh +++ b/scripts/setup-localnet/localnet.sh @@ -6,7 +6,7 @@ cpu_struct="linux"; # Clean up first bash "$(dirname -- "$0";)/localnet.down.sh" -container_version=v1.18.26 +container_version=v2.0.14 container_name="chainlink-solana.test-validator" echo "Starting $container_name@$container_version" diff --git a/solana.nix b/solana.nix index 1cef2a074..db346aa0b 100644 --- a/solana.nix +++ b/solana.nix @@ -5,7 +5,7 @@ # Solana integration let - version = "v1.18.26"; + version = "v2.0.14"; getBinDerivation = { name, @@ -37,14 +37,14 @@ let name = "solana-cli-x86_64-linux"; filename = "solana-release-x86_64-unknown-linux-gnu.tar.bz2"; ### BEGIN_LINUX_SHA256 ### - sha256 = "sha256-XMKpm9aVz+ZzrZrKDA1yKX7DbKgtF5IshlZyJzIj32U="; + sha256 = "sha256-FLV9c0xfrlHyNEfc10pEEofjR1D7ihyOp+2N3RnjYrc="; ### END_LINUX_SHA256 ### }; aarch64-apple-darwin = getBinDerivation { name = "solana-cli-aarch64-apple-darwin"; filename = "solana-release-aarch64-apple-darwin.tar.bz2"; ### BEGIN_DARWIN_SHA256 ### - sha256 = "sha256-PPR4d5DZq5sIo50/3G6foeotIPnnRW601BGILs9ag2k="; + sha256 = "sha256-Xcgf0NqMQfgz33cvD0Z+pxvHCKgA17ZsjAHD/4Tgjbg="; ### END_DARWIN_SHA256 ### }; }; From 1adeccbec4e7347bb9d2a313a5f527ce3a10fdc3 Mon Sep 17 00:00:00 2001 From: "app-token-issuer-infra-releng[bot]" <120227048+app-token-issuer-infra-releng[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 09:44:42 +0000 Subject: [PATCH 02/12] [automated] bump solana image to v2.0.15 (#918) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- integration-tests/testconfig/default.toml | 2 +- scripts/install-solana-ci.sh | 2 +- scripts/setup-localnet/localnet.sh | 2 +- solana.nix | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/integration-tests/testconfig/default.toml b/integration-tests/testconfig/default.toml index dd1f96a99..e8cc0a535 100644 --- a/integration-tests/testconfig/default.toml +++ b/integration-tests/testconfig/default.toml @@ -38,7 +38,7 @@ inside_k8 = false network = "localnet" user = "default" stateful_db = false -devnet_image = "anzaxyz/agave:v2.0.14" +devnet_image = "anzaxyz/agave:v2.0.15" [OCR2] node_count = 6 diff --git a/scripts/install-solana-ci.sh b/scripts/install-solana-ci.sh index b347ef17d..8f62e30f9 100755 --- a/scripts/install-solana-ci.sh +++ b/scripts/install-solana-ci.sh @@ -2,5 +2,5 @@ set -euxo pipefail -sh -c "$(curl -sSfL https://release.anza.xyz/v2.0.14/install)" +sh -c "$(curl -sSfL https://release.anza.xyz/v2.0.15/install)" echo "PATH=$HOME/.local/share/solana/install/active_release/bin:$PATH" >> $GITHUB_ENV diff --git a/scripts/setup-localnet/localnet.sh b/scripts/setup-localnet/localnet.sh index 40f602f2c..31a005ea1 100755 --- a/scripts/setup-localnet/localnet.sh +++ b/scripts/setup-localnet/localnet.sh @@ -6,7 +6,7 @@ cpu_struct="linux"; # Clean up first bash "$(dirname -- "$0";)/localnet.down.sh" -container_version=v2.0.14 +container_version=v2.0.15 container_name="chainlink-solana.test-validator" echo "Starting $container_name@$container_version" diff --git a/solana.nix b/solana.nix index db346aa0b..401e015d7 100644 --- a/solana.nix +++ b/solana.nix @@ -5,7 +5,7 @@ # Solana integration let - version = "v2.0.14"; + version = "v2.0.15"; getBinDerivation = { name, @@ -37,14 +37,14 @@ let name = "solana-cli-x86_64-linux"; filename = "solana-release-x86_64-unknown-linux-gnu.tar.bz2"; ### BEGIN_LINUX_SHA256 ### - sha256 = "sha256-FLV9c0xfrlHyNEfc10pEEofjR1D7ihyOp+2N3RnjYrc="; + sha256 = "sha256-Hd8qhNExur6CSHF7S1ZzRSGMZrJW9FHU3JzVnShvkLI="; ### END_LINUX_SHA256 ### }; aarch64-apple-darwin = getBinDerivation { name = "solana-cli-aarch64-apple-darwin"; filename = "solana-release-aarch64-apple-darwin.tar.bz2"; ### BEGIN_DARWIN_SHA256 ### - sha256 = "sha256-Xcgf0NqMQfgz33cvD0Z+pxvHCKgA17ZsjAHD/4Tgjbg="; + sha256 = "sha256-BglUcvkGx+D0Has9/BqE1WWQ8PNfdOlc75OM5/jFn7E="; ### END_DARWIN_SHA256 ### }; }; From fe731a431aaea1e3789a44e870c4e0ec1f351047 Mon Sep 17 00:00:00 2001 From: Aaron Lu <50029043+aalu1418@users.noreply.github.com> Date: Fri, 8 Nov 2024 07:52:02 -0700 Subject: [PATCH 03/12] stale PR workflow (#920) --- .github/workflows/stale.yml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000..b2ed7ff36 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,26 @@ +# Workflow is triggered daily midnight UTC +# A PR with more than 60 days of inactivity will be marked as stale +# A PR that's stale for more than 7 days will be automatically closed +# Issues are exempt from auto marking as stale but issues with manually added 'stale' label are eligible for auto closure after 7 days. +# PRs with assignees are exempt from auto stale marking, it's the responsibility of the assignee to get the PR progressed either with review/merge or closure. +name: Manage stale Issues and PRs + +on: + schedule: + - cron: "0 0 * * *" # Will be triggered every day at midnight UTC + +jobs: + stale: + + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@v9.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + exempt-all-pr-assignees: true + stale-pr-message: 'This PR is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 7 days.' + days-before-issue-stale: -1 # disables marking issues as stale automatically. Issues can still be marked as stale manually, in which the closure policy applies. From efd6780f69301c8f3f6599cbf3512e4f25270033 Mon Sep 17 00:00:00 2001 From: pablolagreca Date: Tue, 12 Nov 2024 11:52:41 -0300 Subject: [PATCH 04/12] BCFR-966 - Update Solana to support disable chain component tests (#878) * BCFR-967 - add code to support disabling tests * updating go.mod * updating chainlink version so it compiles * fixing gomodtidy failure --- go.mod | 4 ++-- go.sum | 8 ++++---- integration-tests/go.mod | 10 +++++----- integration-tests/go.sum | 16 ++++++++-------- pkg/solana/chainreader/chain_reader_test.go | 1 + 5 files changed, 20 insertions(+), 19 deletions(-) diff --git a/go.mod b/go.mod index 3b5c96887..85c6898ee 100644 --- a/go.mod +++ b/go.mod @@ -13,11 +13,11 @@ require ( github.com/gagliardetto/utilz v0.1.1 github.com/go-viper/mapstructure/v2 v2.1.0 github.com/google/uuid v1.6.0 - github.com/hashicorp/go-plugin v1.6.2-0.20240829161738-06afb6d7ae99 + github.com/hashicorp/go-plugin v1.6.2 github.com/jpillora/backoff v1.0.0 github.com/pelletier/go-toml/v2 v2.2.0 github.com/prometheus/client_golang v1.17.0 - github.com/smartcontractkit/chainlink-common v0.3.1-0.20241023204219-86c89e29937d + github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12 github.com/stretchr/testify v1.9.0 go.uber.org/zap v1.27.0 diff --git a/go.sum b/go.sum index cc25014f7..e73004db8 100644 --- a/go.sum +++ b/go.sum @@ -249,8 +249,8 @@ github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.6.2-0.20240829161738-06afb6d7ae99 h1:OSQYEsRT3tRttZkk6zyC3aAaliwd7Loi/KgXgXxGtwA= -github.com/hashicorp/go-plugin v1.6.2-0.20240829161738-06afb6d7ae99/go.mod h1:CkgLQ5CZqNmdL9U9JzM532t8ZiYQ35+pj3b1FD37R0Q= +github.com/hashicorp/go-plugin v1.6.2 h1:zdGAEd0V1lCaU0u+MxWQhtSDQmahpkwOun8U8EiRVog= +github.com/hashicorp/go-plugin v1.6.2/go.mod h1:CkgLQ5CZqNmdL9U9JzM532t8ZiYQ35+pj3b1FD37R0Q= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -435,8 +435,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241023204219-86c89e29937d h1:34F6OuNyPwCwBXBG8I+s6BbngHlVNOtDKWMOZ9iXOpY= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241023204219-86c89e29937d/go.mod h1:TQ9/KKXZ9vr8QAlUquqGpSvDCpR+DtABKPXZY4CiRns= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 h1:yJNBWCdNL/X8+wEs3TGTBe9gssMmw5FTFxxrlo+0mVo= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 h1:12ijqMM9tvYVEm+nR826WsrNi6zCKpwBhuApq127wHs= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7/go.mod h1:FX7/bVdoep147QQhsOPkYsPEXhGZjeYx6lBSaSXtZOA= github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12 h1:NzZGjaqez21I3DU7objl3xExTH4fxYvzTqar8DC6360= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index c5d204753..bbe5fade1 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -16,12 +16,12 @@ require ( github.com/lib/pq v1.10.9 github.com/pelletier/go-toml/v2 v2.2.3 github.com/rs/zerolog v1.33.0 - github.com/smartcontractkit/chainlink-common v0.3.1-0.20241025132045-cfad02139595 - github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241024132041-a3eb2e31b4c4 + github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 + github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241104202120-39cabce465f6 github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.13 github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.5 github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20241028185036-c645d6db311d - github.com/smartcontractkit/chainlink/v2 v2.14.0-mercury-20240807.0.20241028185036-c645d6db311d + github.com/smartcontractkit/chainlink/v2 v2.14.0-mercury-20240807.0.20241111141533-bb0b7e51595c github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12 github.com/stretchr/testify v1.9.0 github.com/testcontainers/testcontainers-go v0.34.0 @@ -256,7 +256,7 @@ require ( github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.6.2-0.20240829161738-06afb6d7ae99 // indirect + github.com/hashicorp/go-plugin v1.6.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-sockaddr v1.0.6 // indirect @@ -386,7 +386,7 @@ require ( github.com/slack-go/slack v0.15.0 // indirect github.com/smartcontractkit/chain-selectors v1.0.27 // indirect github.com/smartcontractkit/chainlink-automation v1.0.5-0.20241009152924-78acf196c332 // indirect - github.com/smartcontractkit/chainlink-ccip v0.0.0-20241025085158-0f6dce5d1fdb // indirect + github.com/smartcontractkit/chainlink-ccip v0.0.0-20241106140121-4c9ee21ab422 // indirect github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f // indirect github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e // indirect github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 4de60a0b8..23ab321c5 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -846,8 +846,8 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.6.2-0.20240829161738-06afb6d7ae99 h1:OSQYEsRT3tRttZkk6zyC3aAaliwd7Loi/KgXgXxGtwA= -github.com/hashicorp/go-plugin v1.6.2-0.20240829161738-06afb6d7ae99/go.mod h1:CkgLQ5CZqNmdL9U9JzM532t8ZiYQ35+pj3b1FD37R0Q= +github.com/hashicorp/go-plugin v1.6.2 h1:zdGAEd0V1lCaU0u+MxWQhtSDQmahpkwOun8U8EiRVog= +github.com/hashicorp/go-plugin v1.6.2/go.mod h1:CkgLQ5CZqNmdL9U9JzM532t8ZiYQ35+pj3b1FD37R0Q= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= @@ -1384,10 +1384,10 @@ github.com/smartcontractkit/chain-selectors v1.0.27 h1:VE/ftX9Aae4gnw67yR1raKi+3 github.com/smartcontractkit/chain-selectors v1.0.27/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.5-0.20241009152924-78acf196c332 h1:PA8owz52v9CPdHMYUxHAqfHgo+QgZqP6kOBgioJeRno= github.com/smartcontractkit/chainlink-automation v1.0.5-0.20241009152924-78acf196c332/go.mod h1:74ly9zfnQ9EwBtHZH46sIAbxQdOnX56fFjjvSQvn53k= -github.com/smartcontractkit/chainlink-ccip v0.0.0-20241025085158-0f6dce5d1fdb h1:LfcX2Dl59DdxAj49NnbiVJPM0oJVDE7dr+SO+Yz4qUE= -github.com/smartcontractkit/chainlink-ccip v0.0.0-20241025085158-0f6dce5d1fdb/go.mod h1:4adKaHNaxFsRvV/lYfqtbsWyyvIPUMLR0FdOJN/ljis= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241025132045-cfad02139595 h1:H6i0LEvXB0se/63E3jE9N0/7TugOYLpK4e6TT6a0omc= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241025132045-cfad02139595/go.mod h1:TQ9/KKXZ9vr8QAlUquqGpSvDCpR+DtABKPXZY4CiRns= +github.com/smartcontractkit/chainlink-ccip v0.0.0-20241106140121-4c9ee21ab422 h1:VfH/AW5NtTmroY9zz6OYCPFbFTqpMyJ2ubgT9ahYf3U= +github.com/smartcontractkit/chainlink-ccip v0.0.0-20241106140121-4c9ee21ab422/go.mod h1:4adKaHNaxFsRvV/lYfqtbsWyyvIPUMLR0FdOJN/ljis= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 h1:yJNBWCdNL/X8+wEs3TGTBe9gssMmw5FTFxxrlo+0mVo= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f h1:BwrIaQIx5Iy6eT+DfLhFfK2XqjxRm74mVdlX8gbu4dw= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f/go.mod h1:wHtwSR3F1CQSJJZDQKuqaqFYnvkT+kMyget7dl8Clvo= github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e h1:JiETqdNM0bktAUGMc62COwXIaw3rR3M77Me6bBLG0Fg= @@ -1408,8 +1408,8 @@ github.com/smartcontractkit/chainlink-testing-framework/wasp v1.50.2 h1:7bCdbTUW github.com/smartcontractkit/chainlink-testing-framework/wasp v1.50.2/go.mod h1:MltlNu3jcXm/DyLN98I5TFNtu/o1NNAcaPAFKMXWk70= github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20241028185036-c645d6db311d h1:U8LSq81dnqGCfhJEa6l+xigkE1wm1Gj6b37Rb7QNwko= github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20241028185036-c645d6db311d/go.mod h1:FxtPCnzp9lKiktx3HQNtBoKVqiFO9+7NreFCUEFRtEk= -github.com/smartcontractkit/chainlink/v2 v2.14.0-mercury-20240807.0.20241028185036-c645d6db311d h1:pLO8e8x6bf/IBo0JYynKQECwE+B6bAyM4IqFBnxaaT8= -github.com/smartcontractkit/chainlink/v2 v2.14.0-mercury-20240807.0.20241028185036-c645d6db311d/go.mod h1:KfQ6n4zawQmCdBaU41UyFLv0K3EXhZMH1QkDorFFRK0= +github.com/smartcontractkit/chainlink/v2 v2.14.0-mercury-20240807.0.20241111141533-bb0b7e51595c h1:AtweV5rcoBgzKRAvqdGe402b4HUm+Gt8GahGkOJBRkE= +github.com/smartcontractkit/chainlink/v2 v2.14.0-mercury-20240807.0.20241111141533-bb0b7e51595c/go.mod h1:ShJheYm7UbH/JMb+H4d2ivwCTYf744A4EBPo88Nwzek= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 h1:12ijqMM9tvYVEm+nR826WsrNi6zCKpwBhuApq127wHs= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7/go.mod h1:FX7/bVdoep147QQhsOPkYsPEXhGZjeYx6lBSaSXtZOA= github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12 h1:NzZGjaqez21I3DU7objl3xExTH4fxYvzTqar8DC6360= diff --git a/pkg/solana/chainreader/chain_reader_test.go b/pkg/solana/chainreader/chain_reader_test.go index 6c83b0f91..7a1255c07 100644 --- a/pkg/solana/chainreader/chain_reader_test.go +++ b/pkg/solana/chainreader/chain_reader_test.go @@ -417,6 +417,7 @@ func (_m *mockedRPCClient) SetForAddress(pk ag_solana.PublicKey, bts []byte, err } type chainReaderInterfaceTester struct { + TestSelectionSupport conf config.ChainReader address []string reader *wrappedTestChainReader From 65ae13752669cfe8786512e180c690815d6d9ec6 Mon Sep 17 00:00:00 2001 From: amit-momin <108959691+amit-momin@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:39:49 -0600 Subject: [PATCH 05/12] Updated Solana TXM in-memory storage layer to track transactions states (#909) * Updated the in-memory storage to use state maps to better track transactions across their lifecycle * Removed all tx map from in-memory storage * Moved retention timeout logic into OnFinalized and OnError methods * Updated internal tests and fixed linting * Added check for same state transition calls on transactions * Updated logs and fixed chain test * Added new internal TXM tests and moved tx ID generation to Enqueue * Updated broadcast log and fixed confirm timeout logic * Fixed linting * Updated internal tests to validate reap mechanism * Updated comment * Updated error messages * Fixed tests * Fixed internal tests and linting * Reverted predefined error and updated error logs * Fixed chain test * Updated keystore Accounts mock * Added errors to state change methods and updated logs * Encapsulated in-memory storage locking in separate methods * Fixed tests and linting * Added tests for add signature and get tx state * Fixed linting --- pkg/solana/chain.go | 2 +- pkg/solana/chain_test.go | 11 +- pkg/solana/config/config.go | 8 +- pkg/solana/config/mocks/config.go | 18 + pkg/solana/config/toml.go | 6 + pkg/solana/relay.go | 2 +- pkg/solana/transmitter.go | 2 +- pkg/solana/transmitter_test.go | 2 +- pkg/solana/txm/pendingtx.go | 581 +++++++++++++---- pkg/solana/txm/pendingtx_test.go | 949 +++++++++++++++++++++++++++- pkg/solana/txm/prom.go | 4 + pkg/solana/txm/txm.go | 242 ++++--- pkg/solana/txm/txm_internal_test.go | 656 ++++++++++++++----- pkg/solana/txm/txm_load_test.go | 12 +- pkg/solana/txm/txm_race_test.go | 60 +- pkg/solana/txm/utils.go | 54 +- pkg/solana/txm/utils_test.go | 10 +- 17 files changed, 2170 insertions(+), 449 deletions(-) diff --git a/pkg/solana/chain.go b/pkg/solana/chain.go index c47e1cf1b..55b199912 100644 --- a/pkg/solana/chain.go +++ b/pkg/solana/chain.go @@ -574,7 +574,7 @@ func (c *chain) sendTx(ctx context.Context, from, to string, amount *big.Int, ba } chainTxm := c.TxManager() - err = chainTxm.Enqueue(ctx, "", tx, + err = chainTxm.Enqueue(ctx, "", tx, nil, txm.SetComputeUnitLimit(500), // reduce from default 200K limit - should only take 450 compute units // no fee bumping and no additional fee - makes validating balance accurate txm.SetComputeUnitPriceMax(0), diff --git a/pkg/solana/chain_test.go b/pkg/solana/chain_test.go index b705860c9..b5e9adaf8 100644 --- a/pkg/solana/chain_test.go +++ b/pkg/solana/chain_test.go @@ -287,11 +287,11 @@ func TestChain_Transact(t *testing.T) { require.NoError(t, c.txm.Start(ctx)) require.NoError(t, c.Transact(ctx, sender.PublicKey().String(), receiver.PublicKey().String(), amount, true)) - tests.AssertLogEventually(t, logs, "tx state: confirmed") + tests.AssertLogEventually(t, logs, "marking transaction as confirmed") tests.AssertLogEventually(t, logs, "stopped tx retry") require.NoError(t, c.txm.Close()) - filteredLogs := logs.FilterMessage("tx state: confirmed").All() + filteredLogs := logs.FilterMessage("marking transaction as confirmed").All() require.Len(t, filteredLogs, 1) sig, ok := filteredLogs[0].ContextMap()["signature"] require.True(t, ok) @@ -515,6 +515,7 @@ func TestSolanaChain_MultiNode_Txm(t *testing.T) { return sig[:] }, nil) mkey.On("Sign", mock.Anything, pubKeyReceiver.String(), mock.Anything).Return([]byte{}, config.KeyNotFoundError{ID: pubKeyReceiver.String(), KeyType: "Solana"}) + mkey.On("Accounts", mock.Anything).Return([]string{pubKey.String()}, nil).Maybe() testChain, err := newChain("localnet", cfg, mkey, logger.Test(t)) require.NoError(t, err) @@ -556,7 +557,7 @@ func TestSolanaChain_MultiNode_Txm(t *testing.T) { } // Send funds twice, along with an invalid transaction - require.NoError(t, testChain.txm.Enqueue(tests.Context(t), "test_success", createTx(pubKey, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL))) + require.NoError(t, testChain.txm.Enqueue(tests.Context(t), "test_success", createTx(pubKey, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL), nil)) // Wait for new block hash currentBh, err := selectedClient.LatestBlockhash(tests.Context(t)) @@ -577,8 +578,8 @@ NewBlockHash: } } - require.NoError(t, testChain.txm.Enqueue(tests.Context(t), "test_success_2", createTx(pubKey, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL))) - require.Error(t, testChain.txm.Enqueue(tests.Context(t), "test_invalidSigner", createTx(pubKeyReceiver, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL))) // cannot sign tx before enqueuing + require.NoError(t, testChain.txm.Enqueue(tests.Context(t), "test_success_2", createTx(pubKey, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL), nil)) + require.Error(t, testChain.txm.Enqueue(tests.Context(t), "test_invalidSigner", createTx(pubKeyReceiver, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL), nil)) // cannot sign tx before enqueuing // wait for all txes to finish ctx, cancel := context.WithCancel(tests.Context(t)) diff --git a/pkg/solana/config/config.go b/pkg/solana/config/config.go index 08d86d631..28d7ac5fb 100644 --- a/pkg/solana/config/config.go +++ b/pkg/solana/config/config.go @@ -17,7 +17,8 @@ var defaultConfigSet = Chain{ OCR2CacheTTL: config.MustNewDuration(time.Minute), // stale cache deadline TxTimeout: config.MustNewDuration(time.Minute), // timeout for send tx method in client TxRetryTimeout: config.MustNewDuration(10 * time.Second), // duration for tx rebroadcasting to RPC node - TxConfirmTimeout: config.MustNewDuration(30 * time.Second), // duration before discarding tx as unconfirmed + TxConfirmTimeout: config.MustNewDuration(30 * time.Second), // duration before discarding tx as unconfirmed. Set to 0 to disable discarding tx. + TxRetentionTimeout: config.MustNewDuration(0 * time.Second), // duration to retain transactions after being marked as finalized or errored. Set to 0 to immediately drop transactions. SkipPreflight: ptr(true), // to enable or disable preflight checks Commitment: ptr(string(rpc.CommitmentConfirmed)), MaxRetries: ptr(int64(0)), // max number of retries (default = 0). when config.MaxRetries < 0), interpreted as MaxRetries = nil and rpc node will do a reasonable number of retries @@ -43,6 +44,7 @@ type Config interface { TxTimeout() time.Duration TxRetryTimeout() time.Duration TxConfirmTimeout() time.Duration + TxRetentionTimeout() time.Duration SkipPreflight() bool Commitment() rpc.CommitmentType MaxRetries() *uint @@ -67,6 +69,7 @@ type Chain struct { TxTimeout *config.Duration TxRetryTimeout *config.Duration TxConfirmTimeout *config.Duration + TxRetentionTimeout *config.Duration SkipPreflight *bool Commitment *string MaxRetries *int64 @@ -103,6 +106,9 @@ func (c *Chain) SetDefaults() { if c.TxConfirmTimeout == nil { c.TxConfirmTimeout = defaultConfigSet.TxConfirmTimeout } + if c.TxRetentionTimeout == nil { + c.TxRetentionTimeout = defaultConfigSet.TxRetentionTimeout + } if c.SkipPreflight == nil { c.SkipPreflight = defaultConfigSet.SkipPreflight } diff --git a/pkg/solana/config/mocks/config.go b/pkg/solana/config/mocks/config.go index 4d5685b33..feef5c3c6 100644 --- a/pkg/solana/config/mocks/config.go +++ b/pkg/solana/config/mocks/config.go @@ -322,6 +322,24 @@ func (_m *Config) TxConfirmTimeout() time.Duration { return r0 } +// TxRetentionTimeout provides a mock function with given fields: +func (_m *Config) TxRetentionTimeout() time.Duration { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TxRetentionTimeout") + } + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + // TxRetryTimeout provides a mock function with given fields: func (_m *Config) TxRetryTimeout() time.Duration { ret := _m.Called() diff --git a/pkg/solana/config/toml.go b/pkg/solana/config/toml.go index 5f8f770eb..6e9eadc5d 100644 --- a/pkg/solana/config/toml.go +++ b/pkg/solana/config/toml.go @@ -155,6 +155,9 @@ func setFromChain(c, f *Chain) { if f.TxConfirmTimeout != nil { c.TxConfirmTimeout = f.TxConfirmTimeout } + if f.TxRetentionTimeout != nil { + c.TxRetentionTimeout = f.TxRetentionTimeout + } if f.SkipPreflight != nil { c.SkipPreflight = f.SkipPreflight } @@ -238,6 +241,9 @@ func (c *TOMLConfig) TxConfirmTimeout() time.Duration { return c.Chain.TxConfirmTimeout.Duration() } +func (c *TOMLConfig) TxRetentionTimeout() time.Duration { + return c.Chain.TxRetentionTimeout.Duration() +} func (c *TOMLConfig) SkipPreflight() bool { return *c.Chain.SkipPreflight } diff --git a/pkg/solana/relay.go b/pkg/solana/relay.go index 6edd11b4f..8266293ef 100644 --- a/pkg/solana/relay.go +++ b/pkg/solana/relay.go @@ -24,7 +24,7 @@ import ( var _ TxManager = (*txm.Txm)(nil) type TxManager interface { - Enqueue(ctx context.Context, accountID string, msg *solana.Transaction, txCfgs ...txm.SetTxConfig) error + Enqueue(ctx context.Context, accountID string, tx *solana.Transaction, txID *string, txCfgs ...txm.SetTxConfig) error } var _ relaytypes.Relayer = &Relayer{} //nolint:staticcheck diff --git a/pkg/solana/transmitter.go b/pkg/solana/transmitter.go index 4a3731921..a488730d0 100644 --- a/pkg/solana/transmitter.go +++ b/pkg/solana/transmitter.go @@ -84,7 +84,7 @@ func (c *Transmitter) Transmit( // pass transmit payload to tx manager queue c.lggr.Debugf("Queuing transmit tx: state (%s) + transmissions (%s)", c.stateID.String(), c.transmissionsID.String()) - if err = c.txManager.Enqueue(ctx, c.stateID.String(), tx); err != nil { + if err = c.txManager.Enqueue(ctx, c.stateID.String(), tx, nil); err != nil { return fmt.Errorf("error on Transmit.txManager.Enqueue: %w", err) } return nil diff --git a/pkg/solana/transmitter_test.go b/pkg/solana/transmitter_test.go index 66dd8658c..6aef6c921 100644 --- a/pkg/solana/transmitter_test.go +++ b/pkg/solana/transmitter_test.go @@ -26,7 +26,7 @@ type verifyTxSize struct { s *solana.PrivateKey } -func (txm verifyTxSize) Enqueue(_ context.Context, _ string, tx *solana.Transaction, _ ...txm.SetTxConfig) error { +func (txm verifyTxSize) Enqueue(_ context.Context, _ string, tx *solana.Transaction, txID *string, _ ...txm.SetTxConfig) error { // additional components that transaction manager adds to the transaction require.NoError(txm.t, fees.SetComputeUnitPrice(tx, 0)) require.NoError(txm.t, fees.SetComputeUnitLimit(tx, 0)) diff --git a/pkg/solana/txm/pendingtx.go b/pkg/solana/txm/pendingtx.go index 4bf06c653..b2c3c98ed 100644 --- a/pkg/solana/txm/pendingtx.go +++ b/pkg/solana/txm/pendingtx.go @@ -3,132 +3,204 @@ package txm import ( "context" "errors" + "fmt" "sync" "time" "github.com/gagliardetto/solana-go" - "github.com/google/uuid" "golang.org/x/exp/maps" ) +var ( + ErrAlreadyInExpectedState = errors.New("transaction already in expected state") + ErrSigAlreadyExists = errors.New("signature already exists") + ErrIDAlreadyExists = errors.New("id already exists") + ErrSigDoesNotExist = errors.New("signature does not exist") + ErrTransactionNotFound = errors.New("transaction not found for id") +) + type PendingTxContext interface { - New(sig solana.Signature, cancel context.CancelFunc) (uuid.UUID, error) - Add(id uuid.UUID, sig solana.Signature) error - Remove(sig solana.Signature) uuid.UUID + // New adds a new tranasction in Broadcasted state to the storage + New(msg pendingTx, sig solana.Signature, cancel context.CancelFunc) error + // AddSignature adds a new signature for an existing transaction ID + AddSignature(id string, sig solana.Signature) error + // Remove removes transaction and related signatures from storage if not in finalized or errored state + Remove(sig solana.Signature) (string, error) + // ListAll returns all of the signatures being tracked for all transactions not yet finalized or errored ListAll() []solana.Signature - Expired(sig solana.Signature, lifespan time.Duration) bool - // state change hooks - OnSuccess(sig solana.Signature) uuid.UUID - OnError(sig solana.Signature, errType int) uuid.UUID // match err type using enum + // Expired returns whether or not confirmation timeout amount of time has passed since creation + Expired(sig solana.Signature, confirmationTimeout time.Duration) bool + // OnProcessed marks transactions as Processed + OnProcessed(sig solana.Signature) (string, error) + // OnConfirmed marks transaction as Confirmed and moves it from broadcast map to confirmed map + OnConfirmed(sig solana.Signature) (string, error) + // OnFinalized marks transaction as Finalized, moves it from the broadcasted or confirmed map to finalized map, removes signatures from signature map to stop confirmation checks + OnFinalized(sig solana.Signature, retentionTimeout time.Duration) (string, error) + // OnError marks transaction as errored, matches err type using enum, moves it from the broadcasted or confirmed map to finalized/errored map, removes signatures from signature map to stop confirmation checks + OnError(sig solana.Signature, retentionTimeout time.Duration, errType int) (string, error) + // GetTxState returns the transaction state for the provided ID if it exists + GetTxState(id string) (TxState, error) + // TrimFinalizedErroredTxs removes transactions that have reached their retention time + TrimFinalizedErroredTxs() +} + +type pendingTx struct { + tx solana.Transaction + cfg TxConfig + signatures []solana.Signature + id string + createTs time.Time + retentionTs time.Time + state TxState } var _ PendingTxContext = &pendingTxContext{} type pendingTxContext struct { - cancelBy map[uuid.UUID]context.CancelFunc - timestamp map[uuid.UUID]time.Time - sigToID map[solana.Signature]uuid.UUID - idToSigs map[uuid.UUID][]solana.Signature - lock sync.RWMutex + cancelBy map[string]context.CancelFunc + sigToID map[solana.Signature]string + + broadcastedTxs map[string]pendingTx // transactions that require retry and bumping i.e broadcasted, processed + confirmedTxs map[string]pendingTx // transactions that require monitoring for re-org + finalizedErroredTxs map[string]pendingTx // finalized and errored transactions held onto for status + + lock sync.RWMutex } func newPendingTxContext() *pendingTxContext { return &pendingTxContext{ - cancelBy: map[uuid.UUID]context.CancelFunc{}, - timestamp: map[uuid.UUID]time.Time{}, - sigToID: map[solana.Signature]uuid.UUID{}, - idToSigs: map[uuid.UUID][]solana.Signature{}, + cancelBy: map[string]context.CancelFunc{}, + sigToID: map[solana.Signature]string{}, + + broadcastedTxs: map[string]pendingTx{}, + confirmedTxs: map[string]pendingTx{}, + finalizedErroredTxs: map[string]pendingTx{}, } } -func (c *pendingTxContext) New(sig solana.Signature, cancel context.CancelFunc) (uuid.UUID, error) { - // validate signature does not exist - c.lock.RLock() - if _, exists := c.sigToID[sig]; exists { - c.lock.RUnlock() - return uuid.UUID{}, errors.New("signature already exists") +func (c *pendingTxContext) New(tx pendingTx, sig solana.Signature, cancel context.CancelFunc) error { + err := c.withReadLock(func() error { + // validate signature does not exist + if _, exists := c.sigToID[sig]; exists { + return ErrSigAlreadyExists + } + // validate id does not exist + if _, exists := c.broadcastedTxs[tx.id]; exists { + return ErrIDAlreadyExists + } + return nil + }) + if err != nil { + return err } - c.lock.RUnlock() - // upgrade to write lock if sig does not exist - c.lock.Lock() - defer c.lock.Unlock() - if _, exists := c.sigToID[sig]; exists { - return uuid.UUID{}, errors.New("signature already exists") - } - // save cancel func - id := uuid.New() - c.cancelBy[id] = cancel - c.timestamp[id] = time.Now() - c.sigToID[sig] = id - c.idToSigs[id] = []solana.Signature{sig} - return id, nil + // upgrade to write lock if sig or id do not exist + _, err = c.withWriteLock(func() (string, error) { + if _, exists := c.sigToID[sig]; exists { + return "", ErrSigAlreadyExists + } + if _, exists := c.broadcastedTxs[tx.id]; exists { + return "", ErrIDAlreadyExists + } + // save cancel func + c.cancelBy[tx.id] = cancel + c.sigToID[sig] = tx.id + // add signature to tx + tx.signatures = append(tx.signatures, sig) + tx.createTs = time.Now() + tx.state = Broadcasted + // save to the broadcasted map since transaction was just broadcasted + c.broadcastedTxs[tx.id] = tx + return "", nil + }) + return err } -func (c *pendingTxContext) Add(id uuid.UUID, sig solana.Signature) error { - // already exists - c.lock.RLock() - if _, exists := c.sigToID[sig]; exists { - c.lock.RUnlock() - return errors.New("signature already exists") - } - if _, exists := c.idToSigs[id]; !exists { - c.lock.RUnlock() - return errors.New("id does not exist") +func (c *pendingTxContext) AddSignature(id string, sig solana.Signature) error { + err := c.withReadLock(func() error { + // signature already exists + if _, exists := c.sigToID[sig]; exists { + return ErrSigAlreadyExists + } + // new signatures should only be added for broadcasted transactions + // otherwise, the transaction has transitioned states and no longer needs new signatures to track + if _, exists := c.broadcastedTxs[id]; !exists { + return ErrTransactionNotFound + } + return nil + }) + if err != nil { + return err } - c.lock.RUnlock() // upgrade to write lock if sig does not exist - c.lock.Lock() - defer c.lock.Unlock() - if _, exists := c.sigToID[sig]; exists { - return errors.New("signature already exists") - } - if _, exists := c.idToSigs[id]; !exists { - return errors.New("id does not exist - tx likely confirmed by other signature") - } - // save signature - c.sigToID[sig] = id - c.idToSigs[id] = append(c.idToSigs[id], sig) - return nil + _, err = c.withWriteLock(func() (string, error) { + if _, exists := c.sigToID[sig]; exists { + return "", ErrSigAlreadyExists + } + if _, exists := c.broadcastedTxs[id]; !exists { + return "", ErrTransactionNotFound + } + c.sigToID[sig] = id + tx := c.broadcastedTxs[id] + // save new signature + tx.signatures = append(tx.signatures, sig) + // save updated tx to broadcasted map + c.broadcastedTxs[id] = tx + return "", nil + }) + return err } -// returns the id if removed (otherwise returns 0-id) -func (c *pendingTxContext) Remove(sig solana.Signature) (id uuid.UUID) { - // check if already cancelled - c.lock.RLock() - id, sigExists := c.sigToID[sig] - if !sigExists { - c.lock.RUnlock() - return id - } - if _, idExists := c.idToSigs[id]; !idExists { - c.lock.RUnlock() - return id +// returns the id if removed (otherwise returns empty string) +// removes transactions from any state except finalized and errored +func (c *pendingTxContext) Remove(sig solana.Signature) (id string, err error) { + err = c.withReadLock(func() error { + // check if already removed + id, sigExists := c.sigToID[sig] + if !sigExists { + return ErrSigDoesNotExist + } + _, broadcastedIDExists := c.broadcastedTxs[id] + _, confirmedIDExists := c.confirmedTxs[id] + // transcation does not exist in tx maps + if !broadcastedIDExists && !confirmedIDExists { + return ErrTransactionNotFound + } + return nil + }) + if err != nil { + return "", err } - c.lock.RUnlock() // upgrade to write lock if sig does not exist - c.lock.Lock() - defer c.lock.Unlock() - id, sigExists = c.sigToID[sig] - if !sigExists { - return id - } - sigs, idExists := c.idToSigs[id] - if !idExists { - return id - } + return c.withWriteLock(func() (string, error) { + id, sigExists := c.sigToID[sig] + if !sigExists { + return id, ErrSigDoesNotExist + } + var tx pendingTx + if tempTx, exists := c.broadcastedTxs[id]; exists { + tx = tempTx + delete(c.broadcastedTxs, id) + } + if tempTx, exists := c.confirmedTxs[id]; exists { + tx = tempTx + delete(c.confirmedTxs, id) + } - // call cancel func + remove from map - c.cancelBy[id]() // cancel context - delete(c.cancelBy, id) - delete(c.timestamp, id) - delete(c.idToSigs, id) - for _, s := range sigs { - delete(c.sigToID, s) - } - return id + // call cancel func + remove from map + if cancel, exists := c.cancelBy[id]; exists { + cancel() // cancel context + delete(c.cancelBy, id) + } + + // remove all signatures associated with transaction from sig map + for _, s := range tx.signatures { + delete(c.sigToID, s) + } + return id, nil + }) } func (c *pendingTxContext) ListAll() []solana.Signature { @@ -138,28 +210,283 @@ func (c *pendingTxContext) ListAll() []solana.Signature { } // Expired returns if the timeout for trying to confirm a signature has been reached -func (c *pendingTxContext) Expired(sig solana.Signature, lifespan time.Duration) bool { +func (c *pendingTxContext) Expired(sig solana.Signature, confirmationTimeout time.Duration) bool { c.lock.RLock() defer c.lock.RUnlock() + // confirmationTimeout set to 0 disables the expiration check + if confirmationTimeout == 0 { + return false + } id, exists := c.sigToID[sig] if !exists { return false // return expired = false if timestamp does not exist (likely cleaned up by something else previously) } + if tx, exists := c.broadcastedTxs[id]; exists { + return time.Since(tx.createTs) > confirmationTimeout + } + if tx, exists := c.confirmedTxs[id]; exists { + return time.Since(tx.createTs) > confirmationTimeout + } + return false // return expired = false if tx does not exist (likely cleaned up by something else previously) +} - timestamp, exists := c.timestamp[id] - if !exists { - return false // return expired = false if timestamp does not exist (likely cleaned up by something else previously) +func (c *pendingTxContext) OnProcessed(sig solana.Signature) (string, error) { + err := c.withReadLock(func() error { + // validate if sig exists + id, sigExists := c.sigToID[sig] + if !sigExists { + return ErrSigDoesNotExist + } + // Transactions should only move to processed from broadcasted + tx, exists := c.broadcastedTxs[id] + if !exists { + return ErrTransactionNotFound + } + // Check if tranasction already in processed state + if tx.state == Processed { + return ErrAlreadyInExpectedState + } + return nil + }) + if err != nil { + return "", err } - return time.Since(timestamp) > lifespan + // upgrade to write lock if sig and id exist + return c.withWriteLock(func() (string, error) { + id, sigExists := c.sigToID[sig] + if !sigExists { + return id, ErrSigDoesNotExist + } + tx, exists := c.broadcastedTxs[id] + if !exists { + return id, ErrTransactionNotFound + } + tx = c.broadcastedTxs[id] + // update tx state to Processed + tx.state = Processed + // save updated tx back to the broadcasted map + c.broadcastedTxs[id] = tx + return id, nil + }) } -func (c *pendingTxContext) OnSuccess(sig solana.Signature) uuid.UUID { - return c.Remove(sig) +func (c *pendingTxContext) OnConfirmed(sig solana.Signature) (string, error) { + err := c.withReadLock(func() error { + // validate if sig exists + id, sigExists := c.sigToID[sig] + if !sigExists { + return ErrSigDoesNotExist + } + // Check if transaction already in confirmed state + if tx, exists := c.confirmedTxs[id]; exists && tx.state == Confirmed { + return ErrAlreadyInExpectedState + } + // Transactions should only move to confirmed from broadcasted/processed + if _, exists := c.broadcastedTxs[id]; !exists { + return ErrTransactionNotFound + } + return nil + }) + if err != nil { + return "", err + } + + // upgrade to write lock if id exists + return c.withWriteLock(func() (string, error) { + id, sigExists := c.sigToID[sig] + if !sigExists { + return id, ErrSigDoesNotExist + } + if _, exists := c.broadcastedTxs[id]; !exists { + return id, ErrTransactionNotFound + } + // call cancel func + remove from map to stop the retry/bumping cycle for this transaction + if cancel, exists := c.cancelBy[id]; exists { + cancel() // cancel context + delete(c.cancelBy, id) + } + tx := c.broadcastedTxs[id] + // update tx state to Confirmed + tx.state = Confirmed + // move tx to confirmed map + c.confirmedTxs[id] = tx + // remove tx from broadcasted map + delete(c.broadcastedTxs, id) + return id, nil + }) +} + +func (c *pendingTxContext) OnFinalized(sig solana.Signature, retentionTimeout time.Duration) (string, error) { + err := c.withReadLock(func() error { + id, sigExists := c.sigToID[sig] + if !sigExists { + return ErrSigDoesNotExist + } + // Allow transactions to transition from broadcasted, processed, or confirmed state in case there are delays between status checks + _, broadcastedExists := c.broadcastedTxs[id] + _, confirmedExists := c.confirmedTxs[id] + if !broadcastedExists && !confirmedExists { + return ErrTransactionNotFound + } + return nil + }) + if err != nil { + return "", err + } + + // upgrade to write lock if id exists + return c.withWriteLock(func() (string, error) { + id, exists := c.sigToID[sig] + if !exists { + return id, ErrSigDoesNotExist + } + var tx, tempTx pendingTx + var broadcastedExists, confirmedExists bool + if tempTx, broadcastedExists = c.broadcastedTxs[id]; broadcastedExists { + tx = tempTx + } + if tempTx, confirmedExists = c.confirmedTxs[id]; confirmedExists { + tx = tempTx + } + if !broadcastedExists && !confirmedExists { + return id, ErrTransactionNotFound + } + // call cancel func + remove from map to stop the retry/bumping cycle for this transaction + // cancel is expected to be called and removed when tx is confirmed but checked here too in case state is skipped + if cancel, exists := c.cancelBy[id]; exists { + cancel() // cancel context + delete(c.cancelBy, id) + } + // delete from broadcasted map, if exists + delete(c.broadcastedTxs, id) + // delete from confirmed map, if exists + delete(c.confirmedTxs, id) + // remove all related signatures from the sigToID map to skip picking up this tx in the confirmation logic + for _, s := range tx.signatures { + delete(c.sigToID, s) + } + // if retention duration is set to 0, delete transaction from storage + // otherwise, move to finalized map + if retentionTimeout == 0 { + return id, nil + } + // set the timestamp till which the tx should be retained in storage + tx.retentionTs = time.Now().Add(retentionTimeout) + // update tx state to Finalized + tx.state = Finalized + // move transaction from confirmed to finalized map + c.finalizedErroredTxs[id] = tx + return id, nil + }) +} + +func (c *pendingTxContext) OnError(sig solana.Signature, retentionTimeout time.Duration, _ int) (string, error) { + err := c.withReadLock(func() error { + id, sigExists := c.sigToID[sig] + if !sigExists { + return ErrSigDoesNotExist + } + // transaction can transition from any non-finalized state + var broadcastedExists, confirmedExists bool + _, broadcastedExists = c.broadcastedTxs[id] + _, confirmedExists = c.confirmedTxs[id] + // transcation does not exist in any tx maps + if !broadcastedExists && !confirmedExists { + return ErrTransactionNotFound + } + return nil + }) + if err != nil { + return "", err + } + + // upgrade to write lock if sig exists + return c.withWriteLock(func() (string, error) { + id, exists := c.sigToID[sig] + if !exists { + return "", ErrSigDoesNotExist + } + var tx, tempTx pendingTx + var broadcastedExists, confirmedExists bool + if tempTx, broadcastedExists = c.broadcastedTxs[id]; broadcastedExists { + tx = tempTx + } + if tempTx, confirmedExists = c.confirmedTxs[id]; confirmedExists { + tx = tempTx + } + // transcation does not exist in any non-finalized maps + if !broadcastedExists && !confirmedExists { + return "", ErrTransactionNotFound + } + // call cancel func + remove from map + if cancel, exists := c.cancelBy[id]; exists { + cancel() // cancel context + delete(c.cancelBy, id) + } + // delete from broadcasted map, if exists + delete(c.broadcastedTxs, id) + // delete from confirmed map, if exists + delete(c.confirmedTxs, id) + // remove all related signatures from the sigToID map to skip picking up this tx in the confirmation logic + for _, s := range tx.signatures { + delete(c.sigToID, s) + } + // if retention duration is set to 0, delete transaction from storage + // otherwise, move to finalized map + if retentionTimeout == 0 { + return id, nil + } + // set the timestamp till which the tx should be retained in storage + tx.retentionTs = time.Now().Add(retentionTimeout) + // update tx state to Errored + tx.state = Errored + // move transaction from broadcasted to error map + c.finalizedErroredTxs[id] = tx + return id, nil + }) +} + +func (c *pendingTxContext) GetTxState(id string) (TxState, error) { + c.lock.RLock() + defer c.lock.RUnlock() + if tx, exists := c.broadcastedTxs[id]; exists { + return tx.state, nil + } + if tx, exists := c.confirmedTxs[id]; exists { + return tx.state, nil + } + if tx, exists := c.finalizedErroredTxs[id]; exists { + return tx.state, nil + } + return NotFound, fmt.Errorf("failed to find transaction for id: %s", id) } -func (c *pendingTxContext) OnError(sig solana.Signature, _ int) uuid.UUID { - return c.Remove(sig) +// TrimFinalizedErroredTxs deletes transactions from the finalized/errored map and the allTxs map after the retention period has passed +func (c *pendingTxContext) TrimFinalizedErroredTxs() { + c.lock.Lock() + defer c.lock.Unlock() + expiredIDs := make([]string, 0, len(c.finalizedErroredTxs)) + for id, tx := range c.finalizedErroredTxs { + if time.Now().After(tx.retentionTs) { + expiredIDs = append(expiredIDs, id) + } + } + for _, id := range expiredIDs { + delete(c.finalizedErroredTxs, id) + } +} + +func (c *pendingTxContext) withReadLock(fn func() error) error { + c.lock.RLock() + defer c.lock.RUnlock() + return fn() +} + +func (c *pendingTxContext) withWriteLock(fn func() (string, error)) (string, error) { + c.lock.Lock() + defer c.lock.Unlock() + return fn() } var _ PendingTxContext = &pendingTxContextWithProm{} @@ -184,15 +511,27 @@ func newPendingTxContextWithProm(id string) *pendingTxContextWithProm { } } -func (c *pendingTxContextWithProm) New(sig solana.Signature, cancel context.CancelFunc) (uuid.UUID, error) { - return c.pendingTx.New(sig, cancel) +func (c *pendingTxContextWithProm) New(msg pendingTx, sig solana.Signature, cancel context.CancelFunc) error { + return c.pendingTx.New(msg, sig, cancel) +} + +func (c *pendingTxContextWithProm) AddSignature(id string, sig solana.Signature) error { + return c.pendingTx.AddSignature(id, sig) +} + +func (c *pendingTxContextWithProm) OnProcessed(sig solana.Signature) (string, error) { + return c.pendingTx.OnProcessed(sig) } -func (c *pendingTxContextWithProm) Add(id uuid.UUID, sig solana.Signature) error { - return c.pendingTx.Add(id, sig) +func (c *pendingTxContextWithProm) OnConfirmed(sig solana.Signature) (string, error) { + id, err := c.pendingTx.OnConfirmed(sig) // empty ID indicates already previously removed + if id != "" && err == nil { // increment if tx was not removed + promSolTxmSuccessTxs.WithLabelValues(c.chainID).Add(1) + } + return id, err } -func (c *pendingTxContextWithProm) Remove(sig solana.Signature) uuid.UUID { +func (c *pendingTxContextWithProm) Remove(sig solana.Signature) (string, error) { return c.pendingTx.Remove(sig) } @@ -206,25 +545,25 @@ func (c *pendingTxContextWithProm) Expired(sig solana.Signature, lifespan time.D return c.pendingTx.Expired(sig, lifespan) } -// Success - tx included in block and confirmed -func (c *pendingTxContextWithProm) OnSuccess(sig solana.Signature) uuid.UUID { - id := c.pendingTx.OnSuccess(sig) // empty ID indicates already previously removed - if id != uuid.Nil { // increment if tx was not removed - promSolTxmSuccessTxs.WithLabelValues(c.chainID).Add(1) +// Success - tx finalized +func (c *pendingTxContextWithProm) OnFinalized(sig solana.Signature, retentionTimeout time.Duration) (string, error) { + id, err := c.pendingTx.OnFinalized(sig, retentionTimeout) // empty ID indicates already previously removed + if id != "" && err == nil { // increment if tx was not removed + promSolTxmFinalizedTxs.WithLabelValues(c.chainID).Add(1) } - return id + return id, err } -func (c *pendingTxContextWithProm) OnError(sig solana.Signature, errType int) uuid.UUID { +func (c *pendingTxContextWithProm) OnError(sig solana.Signature, retentionTimeout time.Duration, errType int) (string, error) { // special RPC rejects transaction (signature will not be valid) if errType == TxFailReject { promSolTxmRejectTxs.WithLabelValues(c.chainID).Add(1) promSolTxmErrorTxs.WithLabelValues(c.chainID).Add(1) - return uuid.Nil + return "", nil } - id := c.pendingTx.OnError(sig, errType) // empty ID indicates already removed - if id != uuid.Nil { + id, err := c.pendingTx.OnError(sig, retentionTimeout, errType) // err indicates transaction not found so may already be removed + if err == nil { switch errType { case TxFailRevert: promSolTxmRevertTxs.WithLabelValues(c.chainID).Add(1) @@ -246,5 +585,13 @@ func (c *pendingTxContextWithProm) OnError(sig solana.Signature, errType int) uu promSolTxmErrorTxs.WithLabelValues(c.chainID).Add(1) } - return id + return id, err +} + +func (c *pendingTxContextWithProm) GetTxState(id string) (TxState, error) { + return c.pendingTx.GetTxState(id) +} + +func (c *pendingTxContextWithProm) TrimFinalizedErroredTxs() { + c.pendingTx.TrimFinalizedErroredTxs() } diff --git a/pkg/solana/txm/pendingtx_test.go b/pkg/solana/txm/pendingtx_test.go index 5639bff59..b1212ca21 100644 --- a/pkg/solana/txm/pendingtx_test.go +++ b/pkg/solana/txm/pendingtx_test.go @@ -15,15 +15,13 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" ) -func TestPendingTxContext(t *testing.T) { +func TestPendingTxContext_add_remove_multiple(t *testing.T) { var wg sync.WaitGroup ctx := tests.Context(t) - newProcess := func(i int) (solana.Signature, context.CancelFunc) { + newProcess := func() (solana.Signature, context.CancelFunc) { // make random signature - sig := make([]byte, 64) - _, err := rand.Read(sig) - require.NoError(t, err) + sig := randomSignature(t) // start subprocess to wait for context processCtx, cancel := context.WithCancel(ctx) @@ -32,22 +30,23 @@ func TestPendingTxContext(t *testing.T) { <-processCtx.Done() wg.Done() }() - return solana.SignatureFromBytes(sig), cancel + return sig, cancel } // init inflight txs map + store some signatures and cancelFunc txs := newPendingTxContext() - ids := map[solana.Signature]uuid.UUID{} + ids := map[solana.Signature]string{} n := 5 for i := 0; i < n; i++ { - sig, cancel := newProcess(i) - id, err := txs.New(sig, cancel) + sig, cancel := newProcess() + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) assert.NoError(t, err) - ids[sig] = id + ids[sig] = msg.id } // cannot add signature for non existent ID - require.Error(t, txs.Add(uuid.New(), solana.Signature{})) + require.Error(t, txs.AddSignature(uuid.New().String(), solana.Signature{})) // return list of signatures list := txs.ListAll() @@ -55,28 +54,845 @@ func TestPendingTxContext(t *testing.T) { // stop all sub processes for i := 0; i < len(list); i++ { - id := txs.Remove(list[i]) + id, err := txs.Remove(list[i]) + assert.NoError(t, err) assert.Equal(t, n-i-1, len(txs.ListAll())) assert.Equal(t, ids[list[i]], id) // second remove should not return valid id - already removed - assert.Equal(t, uuid.Nil, txs.Remove(list[i])) + id, err = txs.Remove(list[i]) + require.Error(t, err) + assert.Equal(t, "", id) } wg.Wait() } +func TestPendingTxContext_new(t *testing.T) { + t.Parallel() + _, cancel := context.WithCancel(tests.Context(t)) + sig := randomSignature(t) + txs := newPendingTxContext() + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Check it exists in signature map + id, exists := txs.sigToID[sig] + require.True(t, exists) + require.Equal(t, msg.id, id) + + // Check it exists in broadcasted map + tx, exists := txs.broadcastedTxs[msg.id] + require.True(t, exists) + require.Len(t, tx.signatures, 1) + require.Equal(t, sig, tx.signatures[0]) + + // Check status is Broadcasted + require.Equal(t, Broadcasted, tx.state) + + // Check it does not exist in confirmed map + tx, exists = txs.confirmedTxs[msg.id] + require.False(t, exists) + + // Check it does not exist in finalized map + tx, exists = txs.finalizedErroredTxs[msg.id] + require.False(t, exists) +} + +func TestPendingTxContext_add_signature(t *testing.T) { + t.Parallel() + _, cancel := context.WithCancel(tests.Context(t)) + txs := newPendingTxContext() + + t.Run("successfully add signature to transaction", func(t *testing.T) { + sig1 := randomSignature(t) + sig2 := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig1, cancel) + require.NoError(t, err) + + err = txs.AddSignature(msg.id, sig2) + require.NoError(t, err) + + // Check signature map + id, exists := txs.sigToID[sig1] + require.True(t, exists) + require.Equal(t, msg.id, id) + id, exists = txs.sigToID[sig2] + require.True(t, exists) + require.Equal(t, msg.id, id) + + // Check broadcasted map + tx, exists := txs.broadcastedTxs[msg.id] + require.True(t, exists) + require.Len(t, tx.signatures, 2) + require.Equal(t, sig1, tx.signatures[0]) + require.Equal(t, sig2, tx.signatures[1]) + + // Check confirmed map + tx, exists = txs.confirmedTxs[msg.id] + require.False(t, exists) + + // Check finalized map + tx, exists = txs.finalizedErroredTxs[msg.id] + require.False(t, exists) + }) + + t.Run("fails to add duplicate signature", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + err = txs.AddSignature(msg.id, sig) + require.ErrorIs(t, err, ErrSigAlreadyExists) + }) + + t.Run("fails to add signature for missing transaction", func(t *testing.T) { + sig1 := randomSignature(t) + sig2 := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig1, cancel) + require.NoError(t, err) + + err = txs.AddSignature("bad id", sig2) + require.ErrorIs(t, err, ErrTransactionNotFound) + }) + + t.Run("fails to add signature for confirmed transaction", func(t *testing.T) { + sig1 := randomSignature(t) + sig2 := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig1, cancel) + require.NoError(t, err) + + // Transition to processed state + id, err := txs.OnProcessed(sig1) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to confirmed state + id, err = txs.OnConfirmed(sig1) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + err = txs.AddSignature(msg.id, sig2) + require.ErrorIs(t, err, ErrTransactionNotFound) + }) +} + +func TestPendingTxContext_on_broadcasted_processed(t *testing.T) { + t.Parallel() + _, cancel := context.WithCancel(tests.Context(t)) + txs := newPendingTxContext() + retentionTimeout := 5 * time.Second + + t.Run("successfully transition transaction from broadcasted to processed state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to processed state + id, err := txs.OnProcessed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Check it exists in signature map + id, exists := txs.sigToID[sig] + require.True(t, exists) + require.Equal(t, msg.id, id) + + // Check it exists in broadcasted map + tx, exists := txs.broadcastedTxs[msg.id] + require.True(t, exists) + require.Len(t, tx.signatures, 1) + require.Equal(t, sig, tx.signatures[0]) + + // Check status is Processed + require.Equal(t, Processed, tx.state) + + // Check it does not exist in confirmed map + tx, exists = txs.confirmedTxs[msg.id] + require.False(t, exists) + + // Check it does not exist in finalized map + tx, exists = txs.finalizedErroredTxs[msg.id] + require.False(t, exists) + }) + + t.Run("fails to transition transaction from confirmed to processed state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to processed state + id, err := txs.OnProcessed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to confirmed state + id, err = txs.OnConfirmed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition back to processed state + _, err = txs.OnProcessed(sig) + require.Error(t, err) + }) + + t.Run("fails to transition transaction from finalized to processed state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to processed state + id, err := txs.OnProcessed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to confirmed state + id, err = txs.OnConfirmed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to finalized state + id, err = txs.OnFinalized(sig, retentionTimeout) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition back to processed state + _, err = txs.OnProcessed(sig) + require.Error(t, err) + }) + + t.Run("fails to transition transaction from errored to processed state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to errored state + id, err := txs.OnError(sig, retentionTimeout, 0) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition back to processed state + _, err = txs.OnProcessed(sig) + require.Error(t, err) + }) + + t.Run("predefined error if transaction already in processed state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to processed state + id, err := txs.OnProcessed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // No error if OnProcessed called again + _, err = txs.OnProcessed(sig) + require.ErrorIs(t, err, ErrAlreadyInExpectedState) + }) +} + +func TestPendingTxContext_on_confirmed(t *testing.T) { + t.Parallel() + _, cancel := context.WithCancel(tests.Context(t)) + txs := newPendingTxContext() + retentionTimeout := 5 * time.Second + + t.Run("successfully transition transaction from broadcasted/processed to confirmed state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to processed state + id, err := txs.OnProcessed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to confirmed state + id, err = txs.OnConfirmed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Check it exists in signature map + id, exists := txs.sigToID[sig] + require.True(t, exists) + require.Equal(t, msg.id, id) + + // Check it does not exist in broadcasted map + _, exists = txs.broadcastedTxs[msg.id] + require.False(t, exists) + + // Check it exists in confirmed map + tx, exists := txs.confirmedTxs[msg.id] + require.True(t, exists) + require.Len(t, tx.signatures, 1) + require.Equal(t, sig, tx.signatures[0]) + + // Check status is Confirmed + require.Equal(t, Confirmed, tx.state) + + // Check it does not exist in finalized map + tx, exists = txs.finalizedErroredTxs[msg.id] + require.False(t, exists) + }) + + t.Run("fails to transition transaction from finalized to confirmed state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to processed state + id, err := txs.OnProcessed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to confirmed state + id, err = txs.OnConfirmed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to finalized state + id, err = txs.OnFinalized(sig, retentionTimeout) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition back to processed state + _, err = txs.OnConfirmed(sig) + require.Error(t, err) + }) + + t.Run("fails to transition transaction from errored to confirmed state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to errored state + id, err := txs.OnError(sig, retentionTimeout, 0) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition back to confirmed state + _, err = txs.OnConfirmed(sig) + require.Error(t, err) + }) + + t.Run("predefined error if transaction already in confirmed state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to processed state + id, err := txs.OnProcessed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to confirmed state + id, err = txs.OnConfirmed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // No error if OnConfirmed called again + _, err = txs.OnConfirmed(sig) + require.ErrorIs(t, err, ErrAlreadyInExpectedState) + }) +} + +func TestPendingTxContext_on_finalized(t *testing.T) { + t.Parallel() + _, cancel := context.WithCancel(tests.Context(t)) + txs := newPendingTxContext() + retentionTimeout := 5 * time.Second + + t.Run("successfully transition transaction from broadcasted/processed to finalized state", func(t *testing.T) { + sig1 := randomSignature(t) + sig2 := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig1, cancel) + require.NoError(t, err) + + // Add second signature + err = txs.AddSignature(msg.id, sig2) + require.NoError(t, err) + + // Transition to finalized state + id, err := txs.OnFinalized(sig1, retentionTimeout) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Check it does not exist in broadcasted map + _, exists := txs.broadcastedTxs[msg.id] + require.False(t, exists) + + // Check it does not exist in confirmed map + _, exists = txs.confirmedTxs[msg.id] + require.False(t, exists) + + // Check it exists in finalized map + tx, exists := txs.finalizedErroredTxs[msg.id] + require.True(t, exists) + require.Len(t, tx.signatures, 2) + require.Equal(t, sig1, tx.signatures[0]) + require.Equal(t, sig2, tx.signatures[1]) + + // Check status is Finalized + require.Equal(t, Finalized, tx.state) + + // Check sigs do no exist in signature map + _, exists = txs.sigToID[sig1] + require.False(t, exists) + _, exists = txs.sigToID[sig2] + require.False(t, exists) + }) + + t.Run("successfully transition transaction from confirmed to finalized state", func(t *testing.T) { + sig1 := randomSignature(t) + sig2 := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig1, cancel) + require.NoError(t, err) + + // Add second signature + err = txs.AddSignature(msg.id, sig2) + require.NoError(t, err) + + // Transition to processed state + id, err := txs.OnProcessed(sig1) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to confirmed state + id, err = txs.OnConfirmed(sig1) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to finalized state + id, err = txs.OnFinalized(sig1, retentionTimeout) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Check it does not exist in broadcasted map + _, exists := txs.broadcastedTxs[msg.id] + require.False(t, exists) + + // Check it does not exist in confirmed map + _, exists = txs.confirmedTxs[msg.id] + require.False(t, exists) + + // Check it exists in finalized map + tx, exists := txs.finalizedErroredTxs[msg.id] + require.True(t, exists) + require.Len(t, tx.signatures, 2) + require.Equal(t, sig1, tx.signatures[0]) + require.Equal(t, sig2, tx.signatures[1]) + + // Check status is Finalized + require.Equal(t, Finalized, tx.state) + + // Check sigs do no exist in signature map + _, exists = txs.sigToID[sig1] + require.False(t, exists) + _, exists = txs.sigToID[sig2] + require.False(t, exists) + }) + + t.Run("successfully delete transaction when finalized with 0 retention timeout", func(t *testing.T) { + sig1 := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig1, cancel) + require.NoError(t, err) + + // Transition to processed state + id, err := txs.OnProcessed(sig1) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to confirmed state + id, err = txs.OnConfirmed(sig1) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to finalized state + id, err = txs.OnFinalized(sig1, 0*time.Second) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Check it does not exist in broadcasted map + _, exists := txs.broadcastedTxs[msg.id] + require.False(t, exists) + + // Check it does not exist in confirmed map + _, exists = txs.confirmedTxs[msg.id] + require.False(t, exists) + + // Check it does not exist in finalized map + _, exists = txs.finalizedErroredTxs[msg.id] + require.False(t, exists) + + // Check sigs do no exist in signature map + _, exists = txs.sigToID[sig1] + require.False(t, exists) + }) + + t.Run("fails to transition transaction from errored to finalized state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to errored state + id, err := txs.OnError(sig, retentionTimeout, 0) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition back to confirmed state + _, err = txs.OnFinalized(sig, retentionTimeout) + require.Error(t, err) + }) +} + +func TestPendingTxContext_on_error(t *testing.T) { + t.Parallel() + _, cancel := context.WithCancel(tests.Context(t)) + txs := newPendingTxContext() + retentionTimeout := 5 * time.Second + + t.Run("successfully transition transaction from broadcasted/processed to errored state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to errored state + id, err := txs.OnError(sig, retentionTimeout, 0) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Check it does not exist in broadcasted map + _, exists := txs.broadcastedTxs[msg.id] + require.False(t, exists) + + // Check it does not exist in confirmed map + _, exists = txs.confirmedTxs[msg.id] + require.False(t, exists) + + // Check it exists in errored map + tx, exists := txs.finalizedErroredTxs[msg.id] + require.True(t, exists) + require.Len(t, tx.signatures, 1) + require.Equal(t, sig, tx.signatures[0]) + + // Check status is Finalized + require.Equal(t, Errored, tx.state) + + // Check sigs do no exist in signature map + _, exists = txs.sigToID[sig] + require.False(t, exists) + }) + + t.Run("successfully transitions transaction from confirmed to errored state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to errored state + id, err := txs.OnConfirmed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to errored state + id, err = txs.OnError(sig, retentionTimeout, 0) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Check it does not exist in broadcasted map + _, exists := txs.broadcastedTxs[msg.id] + require.False(t, exists) + + // Check it does not exist in confirmed map + _, exists = txs.confirmedTxs[msg.id] + require.False(t, exists) + + // Check it exists in errored map + tx, exists := txs.finalizedErroredTxs[msg.id] + require.True(t, exists) + require.Len(t, tx.signatures, 1) + require.Equal(t, sig, tx.signatures[0]) + + // Check status is Finalized + require.Equal(t, Errored, tx.state) + + // Check sigs do no exist in signature map + _, exists = txs.sigToID[sig] + require.False(t, exists) + }) + + t.Run("successfully delete transaction when errored with 0 retention timeout", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to errored state + id, err := txs.OnConfirmed(sig) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition to errored state + id, err = txs.OnError(sig, 0*time.Second, 0) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Check it does not exist in broadcasted map + _, exists := txs.broadcastedTxs[msg.id] + require.False(t, exists) + + // Check it does not exist in confirmed map + _, exists = txs.confirmedTxs[msg.id] + require.False(t, exists) + + // Check it exists in errored map + _, exists = txs.finalizedErroredTxs[msg.id] + require.False(t, exists) + + // Check sigs do no exist in signature map + _, exists = txs.sigToID[sig] + require.False(t, exists) + }) + + t.Run("fails to transition transaction from finalized to errored state", func(t *testing.T) { + sig := randomSignature(t) + + // Create new transaction + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) + require.NoError(t, err) + + // Transition to confirmed state + id, err := txs.OnFinalized(sig, retentionTimeout) + require.NoError(t, err) + require.Equal(t, msg.id, id) + + // Transition back to confirmed state + id, err = txs.OnError(sig, retentionTimeout, 0) + require.Error(t, err) + require.Equal(t, "", id) + }) +} + +func TestPendingTxContext_remove(t *testing.T) { + t.Parallel() + _, cancel := context.WithCancel(tests.Context(t)) + + txs := newPendingTxContext() + retentionTimeout := 5 * time.Second + + broadcastedSig1 := randomSignature(t) + broadcastedSig2 := randomSignature(t) + processedSig := randomSignature(t) + confirmedSig := randomSignature(t) + finalizedSig := randomSignature(t) + erroredSig := randomSignature(t) + + // Create new broadcasted transaction with extra sig + broadcastedMsg := pendingTx{id: uuid.NewString()} + err := txs.New(broadcastedMsg, broadcastedSig1, cancel) + require.NoError(t, err) + err = txs.AddSignature(broadcastedMsg.id, broadcastedSig2) + require.NoError(t, err) + + // Create new processed transaction + processedMsg := pendingTx{id: uuid.NewString()} + err = txs.New(processedMsg, processedSig, cancel) + require.NoError(t, err) + id, err := txs.OnProcessed(processedSig) + require.NoError(t, err) + require.Equal(t, processedMsg.id, id) + + // Create new confirmed transaction + confirmedMsg := pendingTx{id: uuid.NewString()} + err = txs.New(confirmedMsg, confirmedSig, cancel) + require.NoError(t, err) + id, err = txs.OnConfirmed(confirmedSig) + require.NoError(t, err) + require.Equal(t, confirmedMsg.id, id) + + // Create new finalized transaction + finalizedMsg := pendingTx{id: uuid.NewString()} + err = txs.New(finalizedMsg, finalizedSig, cancel) + require.NoError(t, err) + id, err = txs.OnFinalized(finalizedSig, retentionTimeout) + require.NoError(t, err) + require.Equal(t, finalizedMsg.id, id) + + // Create new errored transaction + erroredMsg := pendingTx{id: uuid.NewString()} + err = txs.New(erroredMsg, erroredSig, cancel) + require.NoError(t, err) + id, err = txs.OnError(erroredSig, retentionTimeout, 0) + require.NoError(t, err) + require.Equal(t, erroredMsg.id, id) + + // Remove broadcasted transaction + id, err = txs.Remove(broadcastedSig1) + require.NoError(t, err) + require.Equal(t, broadcastedMsg.id, id) + // Check removed from broadcasted map + _, exists := txs.broadcastedTxs[broadcastedMsg.id] + require.False(t, exists) + // Check all signatures removed from sig map + _, exists = txs.sigToID[broadcastedSig1] + require.False(t, exists) + _, exists = txs.sigToID[broadcastedSig2] + require.False(t, exists) + + // Remove processed transaction + id, err = txs.Remove(processedSig) + require.NoError(t, err) + require.Equal(t, processedMsg.id, id) + // Check removed from broadcasted map + _, exists = txs.broadcastedTxs[processedMsg.id] + require.False(t, exists) + // Check all signatures removed from sig map + _, exists = txs.sigToID[processedSig] + require.False(t, exists) + + // Remove confirmed transaction + id, err = txs.Remove(confirmedSig) + require.NoError(t, err) + require.Equal(t, confirmedMsg.id, id) + // Check removed from confirmed map + _, exists = txs.confirmedTxs[confirmedMsg.id] + require.False(t, exists) + // Check all signatures removed from sig map + _, exists = txs.sigToID[confirmedSig] + require.False(t, exists) + + // Check remove cannot be called on finalized transaction + id, err = txs.Remove(finalizedSig) + require.Error(t, err) + require.Equal(t, "", id) + + // Check remove cannot be called on errored transaction + id, err = txs.Remove(erroredSig) + require.Error(t, err) + require.Equal(t, "", id) + + // Check sig list is empty after all removals + require.Empty(t, txs.ListAll()) +} +func TestPendingTxContext_trim_finalized_errored_txs(t *testing.T) { + t.Parallel() + txs := newPendingTxContext() + + // Create new finalized transaction with retention ts in the past and add to map + finalizedMsg1 := pendingTx{id: uuid.NewString(), retentionTs: time.Now().Add(-2 * time.Second)} + txs.finalizedErroredTxs[finalizedMsg1.id] = finalizedMsg1 + + // Create new finalized transaction with retention ts in the future and add to map + finalizedMsg2 := pendingTx{id: uuid.NewString(), retentionTs: time.Now().Add(1 * time.Second)} + txs.finalizedErroredTxs[finalizedMsg2.id] = finalizedMsg2 + + // Create new finalized transaction with retention ts in the past and add to map + erroredMsg := pendingTx{id: uuid.NewString(), retentionTs: time.Now().Add(-2 * time.Second)} + txs.finalizedErroredTxs[erroredMsg.id] = erroredMsg + + // Delete finalized/errored transactions that have passed the retention period + txs.TrimFinalizedErroredTxs() + + // Check finalized message past retention is deleted + _, exists := txs.finalizedErroredTxs[finalizedMsg1.id] + require.False(t, exists) + + // Check errored message past retention is deleted + _, exists = txs.finalizedErroredTxs[erroredMsg.id] + require.False(t, exists) + + // Check finalized message within retention period still exists + msg, exists := txs.finalizedErroredTxs[finalizedMsg2.id] + require.True(t, exists) + require.Equal(t, finalizedMsg2.id, msg.id) +} + func TestPendingTxContext_expired(t *testing.T) { + t.Parallel() _, cancel := context.WithCancel(tests.Context(t)) sig := solana.Signature{} txs := newPendingTxContext() - id, err := txs.New(sig, cancel) + msg := pendingTx{id: uuid.NewString()} + err := txs.New(msg, sig, cancel) assert.NoError(t, err) - assert.True(t, txs.Expired(sig, 0*time.Second)) // expired for 0s lifetime + msg, exists := txs.broadcastedTxs[msg.id] + require.True(t, exists) + + // Set createTs to 10 seconds ago + msg.createTs = time.Now().Add(-10 * time.Second) + txs.broadcastedTxs[msg.id] = msg + + assert.False(t, txs.Expired(sig, 0*time.Second)) // false if timeout 0 + assert.True(t, txs.Expired(sig, 5*time.Second)) // expired for 5s lifetime assert.False(t, txs.Expired(sig, 60*time.Second)) // not expired for 60s lifetime - assert.Equal(t, id, txs.Remove(sig)) + id, err := txs.Remove(sig) + assert.NoError(t, err) + assert.Equal(t, msg.id, id) assert.False(t, txs.Expired(sig, 60*time.Second)) // no longer exists, should return false } @@ -88,11 +904,11 @@ func TestPendingTxContext_race(t *testing.T) { var err [2]error go func() { - _, err[0] = txCtx.New(solana.Signature{}, func() {}) + err[0] = txCtx.New(pendingTx{id: uuid.NewString()}, solana.Signature{}, func() {}) wg.Done() }() go func() { - _, err[1] = txCtx.New(solana.Signature{}, func() {}) + err[1] = txCtx.New(pendingTx{id: uuid.NewString()}, solana.Signature{}, func() {}) wg.Done() }() @@ -100,20 +916,21 @@ func TestPendingTxContext_race(t *testing.T) { assert.True(t, (err[0] != nil && err[1] == nil) || (err[0] == nil && err[1] != nil), "one and only one 'add' should have errored") }) - t.Run("add", func(t *testing.T) { + t.Run("add signature", func(t *testing.T) { txCtx := newPendingTxContext() - id, createErr := txCtx.New(solana.Signature{}, func() {}) + msg := pendingTx{id: uuid.NewString()} + createErr := txCtx.New(msg, solana.Signature{}, func() {}) require.NoError(t, createErr) var wg sync.WaitGroup wg.Add(2) var err [2]error go func() { - err[0] = txCtx.Add(id, solana.Signature{1}) + err[0] = txCtx.AddSignature(msg.id, solana.Signature{1}) wg.Done() }() go func() { - err[1] = txCtx.Add(id, solana.Signature{1}) + err[1] = txCtx.AddSignature(msg.id, solana.Signature{1}) wg.Done() }() @@ -123,20 +940,102 @@ func TestPendingTxContext_race(t *testing.T) { t.Run("remove", func(t *testing.T) { txCtx := newPendingTxContext() - _, err := txCtx.New(solana.Signature{}, func() {}) + msg := pendingTx{id: uuid.NewString()} + err := txCtx.New(msg, solana.Signature{}, func() {}) require.NoError(t, err) var wg sync.WaitGroup wg.Add(2) go func() { - assert.NotPanics(t, func() { txCtx.Remove(solana.Signature{}) }) + assert.NotPanics(t, func() { txCtx.Remove(solana.Signature{}) }) //nolint // no need to check error wg.Done() }() go func() { - assert.NotPanics(t, func() { txCtx.Remove(solana.Signature{}) }) + assert.NotPanics(t, func() { txCtx.Remove(solana.Signature{}) }) //nolint // no need to check error wg.Done() }() wg.Wait() }) } + +func TestGetTxState(t *testing.T) { + t.Parallel() + _, cancel := context.WithCancel(tests.Context(t)) + txs := newPendingTxContext() + retentionTimeout := 5 * time.Second + + broadcastedSig := randomSignature(t) + processedSig := randomSignature(t) + confirmedSig := randomSignature(t) + finalizedSig := randomSignature(t) + erroredSig := randomSignature(t) + + // Create new broadcasted transaction with extra sig + broadcastedMsg := pendingTx{id: uuid.NewString()} + err := txs.New(broadcastedMsg, broadcastedSig, cancel) + require.NoError(t, err) + + var state TxState + // Create new processed transaction + processedMsg := pendingTx{id: uuid.NewString()} + err = txs.New(processedMsg, processedSig, cancel) + require.NoError(t, err) + id, err := txs.OnProcessed(processedSig) + require.NoError(t, err) + require.Equal(t, processedMsg.id, id) + // Check Processed state is returned + state, err = txs.GetTxState(processedMsg.id) + require.NoError(t, err) + require.Equal(t, Processed, state) + + // Create new confirmed transaction + confirmedMsg := pendingTx{id: uuid.NewString()} + err = txs.New(confirmedMsg, confirmedSig, cancel) + require.NoError(t, err) + id, err = txs.OnConfirmed(confirmedSig) + require.NoError(t, err) + require.Equal(t, confirmedMsg.id, id) + // Check Confirmed state is returned + state, err = txs.GetTxState(confirmedMsg.id) + require.NoError(t, err) + require.Equal(t, Confirmed, state) + + // Create new finalized transaction + finalizedMsg := pendingTx{id: uuid.NewString()} + err = txs.New(finalizedMsg, finalizedSig, cancel) + require.NoError(t, err) + id, err = txs.OnFinalized(finalizedSig, retentionTimeout) + require.NoError(t, err) + require.Equal(t, finalizedMsg.id, id) + // Check Finalized state is returned + state, err = txs.GetTxState(finalizedMsg.id) + require.NoError(t, err) + require.Equal(t, Finalized, state) + + // Create new errored transaction + erroredMsg := pendingTx{id: uuid.NewString()} + err = txs.New(erroredMsg, erroredSig, cancel) + require.NoError(t, err) + id, err = txs.OnError(erroredSig, retentionTimeout, 0) + require.NoError(t, err) + require.Equal(t, erroredMsg.id, id) + // Check Errored state is returned + state, err = txs.GetTxState(erroredMsg.id) + require.NoError(t, err) + require.Equal(t, Errored, state) + + // Check NotFound state is returned if unknown id provided + state, err = txs.GetTxState("unknown id") + require.Error(t, err) + require.Equal(t, NotFound, state) +} + +func randomSignature(t *testing.T) solana.Signature { + // make random signature + sig := make([]byte, 64) + _, err := rand.Read(sig) + require.NoError(t, err) + + return solana.SignatureFromBytes(sig) +} diff --git a/pkg/solana/txm/prom.go b/pkg/solana/txm/prom.go index 59c74b687..dcc686be8 100644 --- a/pkg/solana/txm/prom.go +++ b/pkg/solana/txm/prom.go @@ -11,6 +11,10 @@ var ( Name: "solana_txm_tx_success", Help: "Number of transactions that are included and successfully executed on chain", }, []string{"chainID"}) + promSolTxmFinalizedTxs = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "solana_txm_tx_finalized", + Help: "Number of transactions that are finalized on chain", + }, []string{"chainID"}) // inflight transactions promSolTxmPendingTxs = promauto.NewGaugeVec(prometheus.GaugeOpts{ diff --git a/pkg/solana/txm/txm.go b/pkg/solana/txm/txm.go index 7cd09cf5e..2a99a6c44 100644 --- a/pkg/solana/txm/txm.go +++ b/pkg/solana/txm/txm.go @@ -17,6 +17,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/loop" "github.com/smartcontractkit/chainlink-common/pkg/services" + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" "github.com/smartcontractkit/chainlink-common/pkg/utils" bigmath "github.com/smartcontractkit/chainlink-common/pkg/utils/big_math" @@ -28,9 +29,10 @@ import ( const ( MaxQueueLen = 1000 - MaxRetryTimeMs = 250 // max tx retry time (exponential retry will taper to retry every 0.25s) - MaxSigsToConfirm = 256 // max number of signatures in GetSignatureStatus call - EstimateComputeUnitLimitBuffer = 10 // percent buffer added on top of estimated compute unit limits to account for any variance + MaxRetryTimeMs = 250 // max tx retry time (exponential retry will taper to retry every 0.25s) + MaxSigsToConfirm = 256 // max number of signatures in GetSignatureStatus call + EstimateComputeUnitLimitBuffer = 10 // percent buffer added on top of estimated compute unit limits to account for any variance + TxReapInterval = 10 * time.Second // interval of time between reaping transactions that have met the retention threshold ) var _ services.Service = (*Txm)(nil) @@ -75,13 +77,6 @@ type TxConfig struct { ComputeUnitLimit uint32 // compute unit limit } -type pendingTx struct { - tx *solanaGo.Transaction - cfg TxConfig - signature solanaGo.Signature - id uuid.UUID -} - // NewTxm creates a txm. Uses simulation so should only be used to send txes to trusted contracts i.e. OCR. func NewTxm(chainID string, client internal.Loader[client.ReaderWriter], sendTx func(ctx context.Context, tx *solanaGo.Transaction) (solanaGo.Signature, error), @@ -136,6 +131,12 @@ func (txm *Txm) Start(ctx context.Context) error { go txm.run() go txm.confirm() go txm.simulate() + // Start reaping loop only if TxRetentionTimeout > 0 + // Otherwise, transactions are dropped immediately after finalization so the loop is not required + if txm.cfg.TxRetentionTimeout() > 0 { + txm.done.Add(1) // waitgroup: reaper + go txm.reap() + } return nil }) @@ -150,7 +151,7 @@ func (txm *Txm) run() { select { case msg := <-txm.chSend: // process tx (pass tx copy) - tx, id, sig, err := txm.sendWithRetry(ctx, *msg.tx, msg.cfg) + tx, id, sig, err := txm.sendWithRetry(ctx, msg) if err != nil { txm.lggr.Errorw("failed to send transaction", "error", err) txm.client.Reset() // clear client if tx fails immediately (potentially bad RPC) @@ -158,13 +159,13 @@ func (txm *Txm) run() { } // send tx + signature to simulation queue - msg.tx = &tx - msg.signature = sig + msg.tx = tx + msg.signatures = append(msg.signatures, sig) msg.id = id select { case txm.chSim <- msg: default: - txm.lggr.Warnw("failed to enqeue tx for simulation", "queueFull", len(txm.chSend) == MaxQueueLen, "tx", msg) + txm.lggr.Warnw("failed to enqueue tx for simulation", "queueFull", len(txm.chSend) == MaxQueueLen, "tx", msg) } txm.lggr.Debugw("transaction sent", "signature", sig.String(), "id", id) @@ -174,29 +175,31 @@ func (txm *Txm) run() { } } -func (txm *Txm) sendWithRetry(ctx context.Context, baseTx solanaGo.Transaction, txcfg TxConfig) (solanaGo.Transaction, uuid.UUID, solanaGo.Signature, error) { +func (txm *Txm) sendWithRetry(ctx context.Context, msg pendingTx) (solanaGo.Transaction, string, solanaGo.Signature, error) { // get key // fee payer account is index 0 account // https://github.com/gagliardetto/solana-go/blob/main/transaction.go#L252 - key := baseTx.Message.AccountKeys[0].String() + key := msg.tx.Message.AccountKeys[0].String() // base compute unit price should only be calculated once // prevent underlying base changing when bumping (could occur with RPC based estimation) getFee := func(count int) fees.ComputeUnitPrice { fee := fees.CalculateFee( - txcfg.BaseComputeUnitPrice, - txcfg.ComputeUnitPriceMax, - txcfg.ComputeUnitPriceMin, + msg.cfg.BaseComputeUnitPrice, + msg.cfg.ComputeUnitPriceMax, + msg.cfg.ComputeUnitPriceMin, uint(count), //nolint:gosec // reasonable number of bumps should never cause overflow ) return fees.ComputeUnitPrice(fee) } + baseTx := msg.tx + // add compute unit limit instruction - static for the transaction // skip if compute unit limit = 0 (otherwise would always fail) - if txcfg.ComputeUnitLimit != 0 { - if computeUnitLimitErr := fees.SetComputeUnitLimit(&baseTx, fees.ComputeUnitLimit(txcfg.ComputeUnitLimit)); computeUnitLimitErr != nil { - return solanaGo.Transaction{}, uuid.Nil, solanaGo.Signature{}, fmt.Errorf("failed to add compute unit limit instruction: %w", computeUnitLimitErr) + if msg.cfg.ComputeUnitLimit != 0 { + if computeUnitLimitErr := fees.SetComputeUnitLimit(&baseTx, fees.ComputeUnitLimit(msg.cfg.ComputeUnitLimit)); computeUnitLimitErr != nil { + return solanaGo.Transaction{}, "", solanaGo.Signature{}, fmt.Errorf("failed to add compute unit limit instruction: %w", computeUnitLimitErr) } } @@ -227,35 +230,35 @@ func (txm *Txm) sendWithRetry(ctx context.Context, baseTx solanaGo.Transaction, initTx, initBuildErr := buildTx(ctx, baseTx, 0) if initBuildErr != nil { - return solanaGo.Transaction{}, uuid.Nil, solanaGo.Signature{}, initBuildErr + return solanaGo.Transaction{}, "", solanaGo.Signature{}, initBuildErr } // create timeout context - ctx, cancel := context.WithTimeout(ctx, txcfg.Timeout) + ctx, cancel := context.WithTimeout(ctx, msg.cfg.Timeout) // send initial tx (do not retry and exit early if fails) sig, initSendErr := txm.sendTx(ctx, &initTx) if initSendErr != nil { - cancel() // cancel context when exiting early - txm.txs.OnError(sig, TxFailReject) // increment failed metric - return solanaGo.Transaction{}, uuid.Nil, solanaGo.Signature{}, fmt.Errorf("tx failed initial transmit: %w", initSendErr) + cancel() // cancel context when exiting early + txm.txs.OnError(sig, txm.cfg.TxRetentionTimeout(), TxFailReject) //nolint // no need to check error since only incrementing metric here + return solanaGo.Transaction{}, "", solanaGo.Signature{}, fmt.Errorf("tx failed initial transmit: %w", initSendErr) } // store tx signature + cancel function - id, initStoreErr := txm.txs.New(sig, cancel) + initStoreErr := txm.txs.New(msg, sig, cancel) if initStoreErr != nil { cancel() // cancel context when exiting early - return solanaGo.Transaction{}, uuid.Nil, solanaGo.Signature{}, fmt.Errorf("failed to save tx signature (%s) to inflight txs: %w", sig, initStoreErr) + return solanaGo.Transaction{}, "", solanaGo.Signature{}, fmt.Errorf("failed to save tx signature (%s) to inflight txs: %w", sig, initStoreErr) } // used for tracking rebroadcasting only in SendWithRetry var sigs signatureList sigs.Allocate() if initSetErr := sigs.Set(0, sig); initSetErr != nil { - return solanaGo.Transaction{}, uuid.Nil, solanaGo.Signature{}, fmt.Errorf("failed to save initial signature in signature list: %w", initSetErr) + return solanaGo.Transaction{}, "", solanaGo.Signature{}, fmt.Errorf("failed to save initial signature in signature list: %w", initSetErr) } - txm.lggr.Debugw("tx initial broadcast", "id", id, "signature", sig) + txm.lggr.Debugw("tx initial broadcast", "id", msg.id, "fee", getFee(0), "signature", sig) txm.done.Add(1) // retry with exponential backoff @@ -274,12 +277,12 @@ func (txm *Txm) sendWithRetry(ctx context.Context, baseTx solanaGo.Transaction, case <-ctx.Done(): // stop sending tx after retry tx ctx times out (does not stop confirmation polling for tx) wg.Wait() - txm.lggr.Debugw("stopped tx retry", "id", id, "signatures", sigs.List(), "err", context.Cause(ctx)) + txm.lggr.Debugw("stopped tx retry", "id", msg.id, "signatures", sigs.List(), "err", context.Cause(ctx)) return case <-tick: var shouldBump bool // bump if period > 0 and past time - if txcfg.FeeBumpPeriod != 0 && time.Since(bumpTime) > txcfg.FeeBumpPeriod { + if msg.cfg.FeeBumpPeriod != 0 && time.Since(bumpTime) > msg.cfg.FeeBumpPeriod { bumpCount++ bumpTime = time.Now() shouldBump = true @@ -290,7 +293,7 @@ func (txm *Txm) sendWithRetry(ctx context.Context, baseTx solanaGo.Transaction, var retryBuildErr error currentTx, retryBuildErr = buildTx(ctx, baseTx, bumpCount) if retryBuildErr != nil { - txm.lggr.Errorw("failed to build bumped retry tx", "error", retryBuildErr, "id", id) + txm.lggr.Errorw("failed to build bumped retry tx", "error", retryBuildErr, "id", msg.id) return // exit func if cannot build tx for retrying } ind := sigs.Allocate() @@ -309,24 +312,24 @@ func (txm *Txm) sendWithRetry(ctx context.Context, baseTx solanaGo.Transaction, // this could occur if endpoint goes down or if ctx cancelled if retrySendErr != nil { if strings.Contains(retrySendErr.Error(), "context canceled") || strings.Contains(retrySendErr.Error(), "context deadline exceeded") { - txm.lggr.Debugw("ctx error on send retry transaction", "error", retrySendErr, "signatures", sigs.List(), "id", id) + txm.lggr.Debugw("ctx error on send retry transaction", "error", retrySendErr, "signatures", sigs.List(), "id", msg.id) } else { - txm.lggr.Warnw("failed to send retry transaction", "error", retrySendErr, "signatures", sigs.List(), "id", id) + txm.lggr.Warnw("failed to send retry transaction", "error", retrySendErr, "signatures", sigs.List(), "id", msg.id) } return } // save new signature if fee bumped if bump { - if retryStoreErr := txm.txs.Add(id, retrySig); retryStoreErr != nil { - txm.lggr.Warnw("error in adding retry transaction", "error", retryStoreErr, "id", id) + if retryStoreErr := txm.txs.AddSignature(msg.id, retrySig); retryStoreErr != nil { + txm.lggr.Warnw("error in adding retry transaction", "error", retryStoreErr, "id", msg.id) return } if setErr := sigs.Set(count, retrySig); setErr != nil { // this should never happen txm.lggr.Errorw("INVARIANT VIOLATION", "error", setErr) } - txm.lggr.Debugw("tx rebroadcast with bumped fee", "id", id, "fee", getFee(count), "signatures", sigs.List()) + txm.lggr.Debugw("tx rebroadcast with bumped fee", "id", msg.id, "fee", getFee(count), "signatures", sigs.List()) } // prevent locking on waitgroup when ctx is closed @@ -358,7 +361,7 @@ func (txm *Txm) sendWithRetry(ctx context.Context, baseTx solanaGo.Transaction, }(ctx, baseTx, initTx) // return signed tx, id, signature for use in simulation - return initTx, id, sig, nil + return initTx, msg.id, sig, nil } // goroutine that polls to confirm implementation @@ -415,45 +418,67 @@ func (txm *Txm) confirm() { // check confirm timeout exceeded if txm.txs.Expired(s[i], txm.cfg.TxConfirmTimeout()) { - id := txm.txs.OnError(s[i], TxFailDrop) - txm.lggr.Infow("failed to find transaction within confirm timeout", "id", id, "signature", s[i], "timeoutSeconds", txm.cfg.TxConfirmTimeout()) + id, err := txm.txs.OnError(s[i], txm.cfg.TxRetentionTimeout(), TxFailDrop) + if err != nil { + txm.lggr.Infow("failed to mark transaction as errored", "id", id, "signature", s[i], "timeoutSeconds", txm.cfg.TxConfirmTimeout(), "error", err) + } else { + txm.lggr.Infow("failed to find transaction within confirm timeout", "id", id, "signature", s[i], "timeoutSeconds", txm.cfg.TxConfirmTimeout()) + } } continue } // if signature has an error, end polling if res[i].Err != nil { - id := txm.txs.OnError(s[i], TxFailRevert) - txm.lggr.Debugw("tx state: failed", - "id", id, - "signature", s[i], - "error", res[i].Err, - "status", res[i].ConfirmationStatus, - ) + id, err := txm.txs.OnError(s[i], txm.cfg.TxRetentionTimeout(), TxFailRevert) + if err != nil { + txm.lggr.Infow("failed to mark transaction as errored", "id", id, "signature", s[i], "error", err) + } else { + txm.lggr.Debugw("tx state: failed", "id", id, "signature", s[i], "error", res[i].Err, "status", res[i].ConfirmationStatus) + } continue } - // if signature is processed, keep polling + // if signature is processed, keep polling for confirmed or finalized status if res[i].ConfirmationStatus == rpc.ConfirmationStatusProcessed { - txm.lggr.Debugw("tx state: processed", - "signature", s[i], - ) + // update transaction state in local memory + id, err := txm.txs.OnProcessed(s[i]) + if err != nil && !errors.Is(err, ErrAlreadyInExpectedState) { + txm.lggr.Errorw("failed to mark transaction as processed", "signature", s[i], "error", err) + } else if err == nil { + txm.lggr.Debugw("marking transaction as processed", "id", id, "signature", s[i]) + } + // check confirm timeout exceeded if TxConfirmTimeout set + if txm.cfg.TxConfirmTimeout() != 0*time.Second && txm.txs.Expired(s[i], txm.cfg.TxConfirmTimeout()) { + id, err := txm.txs.OnError(s[i], txm.cfg.TxRetentionTimeout(), TxFailDrop) + if err != nil { + txm.lggr.Infow("failed to mark transaction as errored", "id", id, "signature", s[i], "timeoutSeconds", txm.cfg.TxConfirmTimeout(), "error", err) + } else { + txm.lggr.Debugw("tx failed to move beyond 'processed' within confirm timeout", "id", id, "signature", s[i], "timeoutSeconds", txm.cfg.TxConfirmTimeout()) + } + } + continue + } - // check confirm timeout exceeded - if txm.txs.Expired(s[i], txm.cfg.TxConfirmTimeout()) { - id := txm.txs.OnError(s[i], TxFailDrop) - txm.lggr.Debugw("tx failed to move beyond 'processed' within confirm timeout", "id", id, "signature", s[i], "timeoutSeconds", txm.cfg.TxConfirmTimeout()) + // if signature is confirmed, keep polling for finalized status + if res[i].ConfirmationStatus == rpc.ConfirmationStatusConfirmed { + id, err := txm.txs.OnConfirmed(s[i]) + if err != nil && !errors.Is(err, ErrAlreadyInExpectedState) { + txm.lggr.Errorw("failed to mark transaction as confirmed", "id", id, "signature", s[i], "error", err) + } else if err == nil { + txm.lggr.Debugw("marking transaction as confirmed", "id", id, "signature", s[i]) } continue } - // if signature is confirmed/finalized, end polling - if res[i].ConfirmationStatus == rpc.ConfirmationStatusConfirmed || res[i].ConfirmationStatus == rpc.ConfirmationStatusFinalized { - id := txm.txs.OnSuccess(s[i]) - txm.lggr.Debugw(fmt.Sprintf("tx state: %s", res[i].ConfirmationStatus), - "id", id, - "signature", s[i], - ) + // if signature is finalized, end polling + if res[i].ConfirmationStatus == rpc.ConfirmationStatusFinalized { + id, err := txm.txs.OnFinalized(s[i], txm.cfg.TxRetentionTimeout()) + if err != nil { + txm.lggr.Errorw("failed to mark transaction as finalized", "id", id, "signature", s[i], "error", err) + } else { + txm.lggr.Debugw("marking transaction as finalized", "id", id, "signature", s[i]) + } continue } } @@ -497,8 +522,11 @@ func (txm *Txm) simulate() { case <-ctx.Done(): return case msg := <-txm.chSim: - res, err := txm.simulateTx(ctx, msg.tx) + res, err := txm.simulateTx(ctx, &msg.tx) if err != nil { + // this error can occur if endpoint goes down or if invalid signature (invalid signature should occur further upstream in sendWithRetry) + // allow retry to continue in case temporary endpoint failure (if still invalid, confirmation or timeout will cleanup) + txm.lggr.Debugw("failed to simulate tx", "id", msg.id, "signatures", msg.signatures, "error", err) continue } @@ -507,13 +535,35 @@ func (txm *Txm) simulate() { continue } - txm.processSimulationError(msg.id, msg.signature, res) + // Transaction has to have a signature if simulation succeeded but added check for belt and braces approach + if len(msg.signatures) > 0 { + txm.processSimulationError(msg.id, msg.signatures[0], res) + } + } + } +} + +// reap is a goroutine that periodically checks whether finalized and errored transactions have reached +// their retention threshold and purges them from the in-memory storage if they have +func (txm *Txm) reap() { + defer txm.done.Done() + ctx, cancel := txm.chStop.NewCtx() + defer cancel() + + tick := time.After(0) + for { + select { + case <-ctx.Done(): + return + case <-tick: + txm.txs.TrimFinalizedErroredTxs() } + tick = time.After(utils.WithJitter(TxReapInterval)) } } -// Enqueue enqueue a msg destined for the solana chain. -func (txm *Txm) Enqueue(ctx context.Context, accountID string, tx *solanaGo.Transaction, txCfgs ...SetTxConfig) error { +// Enqueue enqueues a msg destined for the solana chain. +func (txm *Txm) Enqueue(ctx context.Context, accountID string, tx *solanaGo.Transaction, txID *string, txCfgs ...SetTxConfig) error { if err := txm.Ready(); err != nil { return fmt.Errorf("error in soltxm.Enqueue: %w", err) } @@ -552,9 +602,15 @@ func (txm *Txm) Enqueue(ctx context.Context, accountID string, tx *solanaGo.Tran } } + // Use transaction ID provided by caller if set + id := uuid.New().String() + if txID != nil && *txID != "" { + id = *txID + } msg := pendingTx{ - tx: tx, + tx: *tx, cfg: cfg, + id: id, } select { @@ -566,6 +622,27 @@ func (txm *Txm) Enqueue(ctx context.Context, accountID string, tx *solanaGo.Tran return nil } +// GetTransactionStatus translates internal TXM transaction statuses to chainlink common statuses +func (txm *Txm) GetTransactionStatus(ctx context.Context, transactionID string) (commontypes.TransactionStatus, error) { + state, err := txm.txs.GetTxState(transactionID) + if err != nil { + return commontypes.Unknown, fmt.Errorf("failed to find transaction with id %s: %w", transactionID, err) + } + + switch state { + case Broadcasted: + return commontypes.Pending, nil + case Processed, Confirmed: + return commontypes.Unconfirmed, nil + case Finalized: + return commontypes.Finalized, nil + case Errored: + return commontypes.Failed, nil + default: + return commontypes.Unknown, fmt.Errorf("found unknown transaction state: %s", state.String()) + } +} + // EstimateComputeUnitLimit estimates the compute unit limit needed for a transaction. // It simulates the provided transaction to determine the used compute and applies a buffer to it. func (txm *Txm) EstimateComputeUnitLimit(ctx context.Context, tx *solanaGo.Transaction) (uint32, error) { @@ -580,7 +657,7 @@ func (txm *Txm) EstimateComputeUnitLimit(ctx context.Context, tx *solanaGo.Trans if len(tx.Signatures) > 0 { sig = tx.Signatures[0] } - txm.processSimulationError(uuid.Nil, sig, res) + txm.processSimulationError("", sig, res) return 0, fmt.Errorf("simulated tx returned error: %v", res.Err) } @@ -623,27 +700,38 @@ func (txm *Txm) simulateTx(ctx context.Context, tx *solanaGo.Transaction) (res * } // processSimulationError parses and handles relevant errors found in simulation results -func (txm *Txm) processSimulationError(id uuid.UUID, sig solanaGo.Signature, res *rpc.SimulateTransactionResult) { +func (txm *Txm) processSimulationError(id string, sig solanaGo.Signature, res *rpc.SimulateTransactionResult) { if res.Err != nil { // handle various errors // https://github.com/solana-labs/solana/blob/master/sdk/src/transaction/error.rs errStr := fmt.Sprintf("%v", res.Err) // convert to string to handle various interfaces + logValues := []interface{}{ + "id", id, + "signature", sig, + "result", res, + } switch { // blockhash not found when simulating, occurs when network bank has not seen the given blockhash or tx is too old // let confirmation process clean up case strings.Contains(errStr, "BlockhashNotFound"): - txm.lggr.Debugw("simulate: BlockhashNotFound", "id", id, "signature", sig, "result", res) + txm.lggr.Debugw("simulate: BlockhashNotFound", logValues...) // transaction will encounter execution error/revert, mark as reverted to remove from confirmation + retry case strings.Contains(errStr, "InstructionError"): - txm.txs.OnError(sig, TxFailSimRevert) // cancel retry - txm.lggr.Debugw("simulate: InstructionError", "id", id, "signature", sig, "result", res) + _, err := txm.txs.OnError(sig, txm.cfg.TxRetentionTimeout(), TxFailSimRevert) // cancel retry + if err != nil { + logValues = append(logValues, "stateTransitionErr", err) + } + txm.lggr.Debugw("simulate: InstructionError", logValues...) // transaction is already processed in the chain, letting txm confirmation handle case strings.Contains(errStr, "AlreadyProcessed"): - txm.lggr.Debugw("simulate: AlreadyProcessed", "id", id, "signature", sig, "result", res) + txm.lggr.Debugw("simulate: AlreadyProcessed", logValues...) // unrecognized errors (indicates more concerning failures) default: - txm.txs.OnError(sig, TxFailSimOther) // cancel retry - txm.lggr.Errorw("simulate: unrecognized error", "id", id, "signature", sig, "result", res) + _, err := txm.txs.OnError(sig, txm.cfg.TxRetentionTimeout(), TxFailSimOther) // cancel retry + if err != nil { + logValues = append(logValues, "stateTransitionErr", err) + } + txm.lggr.Errorw("simulate: unrecognized error", logValues...) } } } diff --git a/pkg/solana/txm/txm_internal_test.go b/pkg/solana/txm/txm_internal_test.go index 802dc93b2..d246220a7 100644 --- a/pkg/solana/txm/txm_internal_test.go +++ b/pkg/solana/txm/txm_internal_test.go @@ -5,7 +5,7 @@ package txm import ( "context" "errors" - "math/rand" + "math/big" "sync" "testing" "time" @@ -27,23 +27,26 @@ import ( relayconfig "github.com/smartcontractkit/chainlink-common/pkg/config" "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/types" "github.com/smartcontractkit/chainlink-common/pkg/utils" + bigmath "github.com/smartcontractkit/chainlink-common/pkg/utils/big_math" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" ) type soltxmProm struct { - id string - success, error, revert, reject, drop, simRevert, simOther float64 + id string + confirmed, error, revert, reject, drop, simRevert, simOther, finalized float64 } func (p soltxmProm) assertEqual(t *testing.T) { - assert.Equal(t, p.success, testutil.ToFloat64(promSolTxmSuccessTxs.WithLabelValues(p.id)), "mismatch: success") + assert.Equal(t, p.confirmed, testutil.ToFloat64(promSolTxmSuccessTxs.WithLabelValues(p.id)), "mismatch: confirmed") assert.Equal(t, p.error, testutil.ToFloat64(promSolTxmErrorTxs.WithLabelValues(p.id)), "mismatch: error") assert.Equal(t, p.revert, testutil.ToFloat64(promSolTxmRevertTxs.WithLabelValues(p.id)), "mismatch: revert") assert.Equal(t, p.reject, testutil.ToFloat64(promSolTxmRejectTxs.WithLabelValues(p.id)), "mismatch: reject") assert.Equal(t, p.drop, testutil.ToFloat64(promSolTxmDropTxs.WithLabelValues(p.id)), "mismatch: drop") assert.Equal(t, p.simRevert, testutil.ToFloat64(promSolTxmSimRevertTxs.WithLabelValues(p.id)), "mismatch: simRevert") assert.Equal(t, p.simOther, testutil.ToFloat64(promSolTxmSimOtherTxs.WithLabelValues(p.id)), "mismatch: simOther") + assert.Equal(t, p.finalized, testutil.ToFloat64(promSolTxmFinalizedTxs.WithLabelValues(p.id)), "mismatch: finalized") } func (p soltxmProm) getInflight() float64 { @@ -51,7 +54,7 @@ func (p soltxmProm) getInflight() float64 { } // create placeholder transaction and returns func for signed tx with fee -func getTx(t *testing.T, val uint64, keystore SimpleKeystore, price fees.ComputeUnitPrice) (*solana.Transaction, func(fees.ComputeUnitPrice, bool) *solana.Transaction) { +func getTx(t *testing.T, val uint64, keystore SimpleKeystore) (*solana.Transaction, func(fees.ComputeUnitPrice, bool, fees.ComputeUnitLimit) *solana.Transaction) { pubkey := solana.PublicKey{} // create transfer tx @@ -70,12 +73,12 @@ func getTx(t *testing.T, val uint64, keystore SimpleKeystore, price fees.Compute base := *tx // tx to send to txm, txm will add fee & sign - return &base, func(price fees.ComputeUnitPrice, addLimit bool) *solana.Transaction { + return &base, func(price fees.ComputeUnitPrice, addLimit bool, limit fees.ComputeUnitLimit) *solana.Transaction { tx := base // add fee parameters require.NoError(t, fees.SetComputeUnitPrice(&tx, price)) if addLimit { - require.NoError(t, fees.SetComputeUnitLimit(&tx, 200_000)) // default + require.NoError(t, fees.SetComputeUnitLimit(&tx, limit)) // default } // sign tx @@ -90,6 +93,24 @@ func getTx(t *testing.T, val uint64, keystore SimpleKeystore, price fees.Compute } } +// check if cached transaction is cleared +func empty(t *testing.T, txm *Txm, prom soltxmProm) bool { + count := txm.InflightTxs() + assert.Equal(t, float64(count), prom.getInflight()) // validate prom metric and txs length + return count == 0 +} + +// waits for the provided function to evaluate to true within the provided duration amount of time +func waitFor(t *testing.T, waitDuration time.Duration, txm *Txm, prom soltxmProm, f func(*testing.T, *Txm, soltxmProm) bool) { + for i := 0; i < int(waitDuration.Seconds()*1.5); i++ { + if f(t, txm, prom) { + return + } + time.Sleep(time.Second) + } + assert.NoError(t, errors.New("unable to confirm inflight txs is empty")) +} + func TestTxm(t *testing.T) { for _, eName := range []string{"fixed", "blockhistory"} { estimator := eName @@ -115,35 +136,14 @@ func TestTxm(t *testing.T) { loader := utils.NewLazyLoad(func() (client.ReaderWriter, error) { return mc, nil }) txm := NewTxm(id, loader, nil, cfg, mkey, lggr) require.NoError(t, txm.Start(ctx)) + t.Cleanup(func () { require.NoError(t, txm.Close())}) // tracking prom metrics prom := soltxmProm{id: id} - // create random signature - getSig := func() solana.Signature { - sig := make([]byte, 64) - rand.Read(sig) - return solana.SignatureFromBytes(sig) - } - - // check if cached transaction is cleared - empty := func() bool { - count := txm.InflightTxs() - assert.Equal(t, float64(count), prom.getInflight()) // validate prom metric and txs length - return count == 0 - } - // adjust wait time based on config waitDuration := cfg.TxConfirmTimeout() - waitFor := func(f func() bool) { - for i := 0; i < int(waitDuration.Seconds()*1.5); i++ { - if f() { - return - } - time.Sleep(time.Second) - } - assert.NoError(t, errors.New("unable to confirm inflight txs is empty")) - } + computeUnitLimitDefault := fees.ComputeUnitLimit(cfg.ComputeUnitLimitDefault()) // handle signature statuses calls statuses := map[solana.Signature]func() *rpc.SignatureStatusesResult{} @@ -161,27 +161,26 @@ func TestTxm(t *testing.T) { }, nil, ) - // happy path (send => simulate success => tx: nil => tx: processed => tx: confirmed => done) + // happy path (send => simulate success => tx: nil => tx: processed => tx: confirmed => finalized => done) t.Run("happyPath", func(t *testing.T) { - sig := getSig() - tx, signed := getTx(t, 0, mkey, 0) + sig := randomSignature(t) + tx, signed := getTx(t, 0, mkey) var wg sync.WaitGroup - wg.Add(3) + wg.Add(1) sendCount := 0 var countRW sync.RWMutex - mc.On("SendTx", mock.Anything, signed(0, true)).Run(func(mock.Arguments) { + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Run(func(mock.Arguments) { countRW.Lock() sendCount++ countRW.Unlock() }).After(500*time.Millisecond).Return(sig, nil) - mc.On("SimulateTx", mock.Anything, signed(0, true), mock.Anything).Return(&rpc.SimulateTransactionResult{}, nil).Once() + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimitDefault), mock.Anything).Return(&rpc.SimulateTransactionResult{}, nil).Once() // handle signature status calls count := 0 statuses[sig] = func() (out *rpc.SignatureStatusesResult) { defer func() { count++ }() - defer wg.Done() out = &rpc.SignatureStatusesResult{} if count == 1 { @@ -193,15 +192,22 @@ func TestTxm(t *testing.T) { out.ConfirmationStatus = rpc.ConfirmationStatusConfirmed return } + + if count == 3 { + out.ConfirmationStatus = rpc.ConfirmationStatusFinalized + wg.Done() + return + } return nil } // send tx - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx)) + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) wg.Wait() // no transactions stored inflight txs list - waitFor(empty) + waitFor(t, waitDuration, txm, prom, empty) // transaction should be sent more than twice countRW.RLock() t.Logf("sendTx received %d calls", sendCount) @@ -212,43 +218,51 @@ func TestTxm(t *testing.T) { mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() // check prom metric - prom.success++ + prom.confirmed++ + prom.finalized++ prom.assertEqual(t) + + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status }) // fail on initial transmit (RPC immediate rejects) t.Run("fail_initialTx", func(t *testing.T) { - tx, signed := getTx(t, 1, mkey, 0) + tx, signed := getTx(t, 1, mkey) var wg sync.WaitGroup wg.Add(1) // should only be called once (tx does not start retry, confirming, or simulation) - mc.On("SendTx", mock.Anything, signed(0, true)).Run(func(mock.Arguments) { + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Run(func(mock.Arguments) { wg.Done() }).Return(solana.Signature{}, errors.New("FAIL")).Once() // tx should be able to queue - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx)) + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) wg.Wait() // wait to be picked up and processed // no transactions stored inflight txs list - waitFor(empty) + waitFor(t, waitDuration, txm, prom, empty) // check prom metric prom.error++ prom.reject++ prom.assertEqual(t) + + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status }) // tx fails simulation (simulation error) t.Run("fail_simulation", func(t *testing.T) { - tx, signed := getTx(t, 2, mkey, 0) - sig := getSig() + tx, signed := getTx(t, 2, mkey) + sig := randomSignature(t) var wg sync.WaitGroup wg.Add(1) - mc.On("SendTx", mock.Anything, signed(0, true)).Return(sig, nil) - mc.On("SimulateTx", mock.Anything, signed(0, true), mock.Anything).Run(func(mock.Arguments) { + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Return(sig, nil) + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimitDefault), mock.Anything).Run(func(mock.Arguments) { wg.Done() }).Return(&rpc.SimulateTransactionResult{ Err: "FAIL", @@ -256,47 +270,55 @@ func TestTxm(t *testing.T) { // signature status is nil (handled automatically) // tx should be able to queue - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx)) - wg.Wait() // wait to be picked up and processed - waitFor(empty) // txs cleared quickly + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) + wg.Wait() // wait to be picked up and processed + waitFor(t, waitDuration, txm, prom, empty) // txs cleared quickly // check prom metric prom.error++ prom.simOther++ prom.assertEqual(t) + + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status }) // tx fails simulation (rpc error, timeout should clean up b/c sig status will be nil) t.Run("fail_simulation_confirmNil", func(t *testing.T) { - tx, signed := getTx(t, 3, mkey, 0) - sig := getSig() - retry0 := getSig() - retry1 := getSig() - retry2 := getSig() - retry3 := getSig() + tx, signed := getTx(t, 3, mkey) + sig := randomSignature(t) + retry0 := randomSignature(t) + retry1 := randomSignature(t) + retry2 := randomSignature(t) + retry3 := randomSignature(t) var wg sync.WaitGroup wg.Add(1) - mc.On("SendTx", mock.Anything, signed(0, true)).Return(sig, nil) - mc.On("SendTx", mock.Anything, signed(1, true)).Return(retry0, nil) - mc.On("SendTx", mock.Anything, signed(2, true)).Return(retry1, nil) - mc.On("SendTx", mock.Anything, signed(3, true)).Return(retry2, nil).Maybe() - mc.On("SendTx", mock.Anything, signed(4, true)).Return(retry3, nil).Maybe() - mc.On("SimulateTx", mock.Anything, signed(0, true), mock.Anything).Run(func(mock.Arguments) { + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Return(sig, nil) + mc.On("SendTx", mock.Anything, signed(1, true, computeUnitLimitDefault)).Return(retry0, nil) + mc.On("SendTx", mock.Anything, signed(2, true, computeUnitLimitDefault)).Return(retry1, nil) + mc.On("SendTx", mock.Anything, signed(3, true, computeUnitLimitDefault)).Return(retry2, nil).Maybe() + mc.On("SendTx", mock.Anything, signed(4, true, computeUnitLimitDefault)).Return(retry3, nil).Maybe() + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimitDefault), mock.Anything).Run(func(mock.Arguments) { wg.Done() }).Return(&rpc.SimulateTransactionResult{}, errors.New("FAIL")).Once() // all signature statuses are nil, handled automatically // tx should be able to queue - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx)) - wg.Wait() // wait to be picked up and processed - waitFor(empty) // txs cleared after timeout + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) + wg.Wait() // wait to be picked up and processed + waitFor(t, waitDuration, txm, prom, empty) // txs cleared after timeout // check prom metric prom.error++ prom.drop++ prom.assertEqual(t) + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status + // panic if sendTx called after context cancelled mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() }) @@ -304,8 +326,8 @@ func TestTxm(t *testing.T) { // tx fails simulation with an InstructionError (indicates reverted execution) // manager should cancel sending retry immediately + increment reverted prom metric t.Run("fail_simulation_instructionError", func(t *testing.T) { - tx, signed := getTx(t, 4, mkey, 0) - sig := getSig() + tx, signed := getTx(t, 4, mkey) + sig := randomSignature(t) var wg sync.WaitGroup wg.Add(1) @@ -315,8 +337,8 @@ func TestTxm(t *testing.T) { 0, map[string]int{"Custom": 6003}, }, } - mc.On("SendTx", mock.Anything, signed(0, true)).Return(sig, nil) - mc.On("SimulateTx", mock.Anything, signed(0, true), mock.Anything).Run(func(mock.Arguments) { + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Return(sig, nil) + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimitDefault), mock.Anything).Run(func(mock.Arguments) { wg.Done() }).Return(&rpc.SimulateTransactionResult{ Err: tempErr, @@ -324,29 +346,33 @@ func TestTxm(t *testing.T) { // all signature statuses are nil, handled automatically // tx should be able to queue - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx)) - wg.Wait() // wait to be picked up and processed - waitFor(empty) // txs cleared after timeout + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) + wg.Wait() // wait to be picked up and processed + waitFor(t, waitDuration, txm, prom, empty) // txs cleared after timeout // check prom metric prom.error++ prom.simRevert++ prom.assertEqual(t) + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status + // panic if sendTx called after context cancelled mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() }) // tx fails simulation with BlockHashNotFound error - // txm should continue to confirm tx (in this case it will succeed) + // txm should continue to finalize tx (in this case it will succeed) t.Run("fail_simulation_blockhashNotFound", func(t *testing.T) { - tx, signed := getTx(t, 5, mkey, 0) - sig := getSig() + tx, signed := getTx(t, 5, mkey) + sig := randomSignature(t) var wg sync.WaitGroup - wg.Add(3) + wg.Add(2) - mc.On("SendTx", mock.Anything, signed(0, true)).Return(sig, nil) - mc.On("SimulateTx", mock.Anything, signed(0, true), mock.Anything).Run(func(mock.Arguments) { + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Return(sig, nil) + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimitDefault), mock.Anything).Run(func(mock.Arguments) { wg.Done() }).Return(&rpc.SimulateTransactionResult{ Err: "BlockhashNotFound", @@ -356,25 +382,34 @@ func TestTxm(t *testing.T) { count := 0 statuses[sig] = func() (out *rpc.SignatureStatusesResult) { defer func() { count++ }() - defer wg.Done() out = &rpc.SignatureStatusesResult{} - if count == 1 { + if count == 0 { out.ConfirmationStatus = rpc.ConfirmationStatusConfirmed return } + if count == 1 { + out.ConfirmationStatus = rpc.ConfirmationStatusFinalized + wg.Done() + return + } return nil } // tx should be able to queue - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx)) - wg.Wait() // wait to be picked up and processed - waitFor(empty) // txs cleared after timeout + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) + wg.Wait() // wait to be picked up and processed + waitFor(t, waitDuration, txm, prom, empty) // txs cleared after timeout // check prom metric - prom.success++ + prom.confirmed++ + prom.finalized++ prom.assertEqual(t) + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status + // panic if sendTx called after context cancelled mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() }) @@ -382,13 +417,13 @@ func TestTxm(t *testing.T) { // tx fails simulation with AlreadyProcessed error // txm should continue to confirm tx (in this case it will revert) t.Run("fail_simulation_alreadyProcessed", func(t *testing.T) { - tx, signed := getTx(t, 6, mkey, 0) - sig := getSig() + tx, signed := getTx(t, 6, mkey) + sig := randomSignature(t) var wg sync.WaitGroup wg.Add(2) - mc.On("SendTx", mock.Anything, signed(0, true)).Return(sig, nil) - mc.On("SimulateTx", mock.Anything, signed(0, true), mock.Anything).Run(func(mock.Arguments) { + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Return(sig, nil) + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimitDefault), mock.Anything).Run(func(mock.Arguments) { wg.Done() }).Return(&rpc.SimulateTransactionResult{ Err: "AlreadyProcessed", @@ -404,36 +439,40 @@ func TestTxm(t *testing.T) { } // tx should be able to queue - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx)) - wg.Wait() // wait to be picked up and processed - waitFor(empty) // txs cleared after timeout + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) + wg.Wait() // wait to be picked up and processed + waitFor(t, waitDuration, txm, prom, empty) // txs cleared after timeout // check prom metric prom.revert++ prom.error++ prom.assertEqual(t) + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status + // panic if sendTx called after context cancelled mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() }) // tx passes sim, never passes processed (timeout should cleanup) t.Run("fail_confirm_processed", func(t *testing.T) { - tx, signed := getTx(t, 7, mkey, 0) - sig := getSig() - retry0 := getSig() - retry1 := getSig() - retry2 := getSig() - retry3 := getSig() + tx, signed := getTx(t, 7, mkey) + sig := randomSignature(t) + retry0 := randomSignature(t) + retry1 := randomSignature(t) + retry2 := randomSignature(t) + retry3 := randomSignature(t) var wg sync.WaitGroup wg.Add(1) - mc.On("SendTx", mock.Anything, signed(0, true)).Return(sig, nil) - mc.On("SendTx", mock.Anything, signed(1, true)).Return(retry0, nil) - mc.On("SendTx", mock.Anything, signed(2, true)).Return(retry1, nil) - mc.On("SendTx", mock.Anything, signed(3, true)).Return(retry2, nil).Maybe() - mc.On("SendTx", mock.Anything, signed(4, true)).Return(retry3, nil).Maybe() - mc.On("SimulateTx", mock.Anything, signed(0, true), mock.Anything).Run(func(mock.Arguments) { + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Return(sig, nil) + mc.On("SendTx", mock.Anything, signed(1, true, computeUnitLimitDefault)).Return(retry0, nil) + mc.On("SendTx", mock.Anything, signed(2, true, computeUnitLimitDefault)).Return(retry1, nil) + mc.On("SendTx", mock.Anything, signed(3, true, computeUnitLimitDefault)).Return(retry2, nil).Maybe() + mc.On("SendTx", mock.Anything, signed(4, true, computeUnitLimitDefault)).Return(retry3, nil).Maybe() + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimitDefault), mock.Anything).Run(func(mock.Arguments) { wg.Done() }).Return(&rpc.SimulateTransactionResult{}, nil).Once() @@ -445,36 +484,40 @@ func TestTxm(t *testing.T) { } // tx should be able to queue - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx)) - wg.Wait() // wait to be picked up and processed - waitFor(empty) // inflight txs cleared after timeout + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) + wg.Wait() // wait to be picked up and processed + waitFor(t, waitDuration, txm, prom, empty) // inflight txs cleared after timeout // check prom metric prom.error++ prom.drop++ prom.assertEqual(t) + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status + // panic if sendTx called after context cancelled mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() }) // tx passes sim, shows processed, moves to nil (timeout should cleanup) t.Run("fail_confirm_processedToNil", func(t *testing.T) { - tx, signed := getTx(t, 8, mkey, 0) - sig := getSig() - retry0 := getSig() - retry1 := getSig() - retry2 := getSig() - retry3 := getSig() + tx, signed := getTx(t, 8, mkey) + sig := randomSignature(t) + retry0 := randomSignature(t) + retry1 := randomSignature(t) + retry2 := randomSignature(t) + retry3 := randomSignature(t) var wg sync.WaitGroup wg.Add(1) - mc.On("SendTx", mock.Anything, signed(0, true)).Return(sig, nil) - mc.On("SendTx", mock.Anything, signed(1, true)).Return(retry0, nil) - mc.On("SendTx", mock.Anything, signed(2, true)).Return(retry1, nil) - mc.On("SendTx", mock.Anything, signed(3, true)).Return(retry2, nil).Maybe() - mc.On("SendTx", mock.Anything, signed(4, true)).Return(retry3, nil).Maybe() - mc.On("SimulateTx", mock.Anything, signed(0, true), mock.Anything).Run(func(mock.Arguments) { + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Return(sig, nil) + mc.On("SendTx", mock.Anything, signed(1, true, computeUnitLimitDefault)).Return(retry0, nil) + mc.On("SendTx", mock.Anything, signed(2, true, computeUnitLimitDefault)).Return(retry1, nil) + mc.On("SendTx", mock.Anything, signed(3, true, computeUnitLimitDefault)).Return(retry2, nil).Maybe() + mc.On("SendTx", mock.Anything, signed(4, true, computeUnitLimitDefault)).Return(retry3, nil).Maybe() + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimitDefault), mock.Anything).Run(func(mock.Arguments) { wg.Done() }).Return(&rpc.SimulateTransactionResult{}, nil).Once() @@ -493,28 +536,32 @@ func TestTxm(t *testing.T) { } // tx should be able to queue - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx)) - wg.Wait() // wait to be picked up and processed - waitFor(empty) // inflight txs cleared after timeout + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) + wg.Wait() // wait to be picked up and processed + waitFor(t, waitDuration, txm, prom, empty) // inflight txs cleared after timeout // check prom metric prom.error++ prom.drop++ prom.assertEqual(t) + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status + // panic if sendTx called after context cancelled mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() }) // tx passes sim, errors on confirm t.Run("fail_confirm_revert", func(t *testing.T) { - tx, signed := getTx(t, 9, mkey, 0) - sig := getSig() + tx, signed := getTx(t, 9, mkey) + sig := randomSignature(t) var wg sync.WaitGroup wg.Add(1) - mc.On("SendTx", mock.Anything, signed(0, true)).Return(sig, nil) - mc.On("SimulateTx", mock.Anything, signed(0, true), mock.Anything).Run(func(mock.Arguments) { + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Return(sig, nil) + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimitDefault), mock.Anything).Run(func(mock.Arguments) { wg.Done() }).Return(&rpc.SimulateTransactionResult{}, nil).Once() @@ -527,36 +574,40 @@ func TestTxm(t *testing.T) { } // tx should be able to queue - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx)) - wg.Wait() // wait to be picked up and processed - waitFor(empty) // inflight txs cleared after timeout + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) + wg.Wait() // wait to be picked up and processed + waitFor(t, waitDuration, txm, prom, empty) // inflight txs cleared after timeout // check prom metric prom.error++ prom.revert++ prom.assertEqual(t) + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status + // panic if sendTx called after context cancelled mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() }) // tx passes sim, first retried TXs get dropped t.Run("success_retryTx", func(t *testing.T) { - tx, signed := getTx(t, 10, mkey, 0) - sig := getSig() - retry0 := getSig() - retry1 := getSig() - retry2 := getSig() - retry3 := getSig() + tx, signed := getTx(t, 10, mkey) + sig := randomSignature(t) + retry0 := randomSignature(t) + retry1 := randomSignature(t) + retry2 := randomSignature(t) + retry3 := randomSignature(t) var wg sync.WaitGroup wg.Add(2) - mc.On("SendTx", mock.Anything, signed(0, true)).Return(sig, nil) - mc.On("SendTx", mock.Anything, signed(1, true)).Return(retry0, nil) - mc.On("SendTx", mock.Anything, signed(2, true)).Return(retry1, nil) - mc.On("SendTx", mock.Anything, signed(3, true)).Return(retry2, nil).Maybe() - mc.On("SendTx", mock.Anything, signed(4, true)).Return(retry3, nil).Maybe() - mc.On("SimulateTx", mock.Anything, signed(0, true), mock.Anything).Run(func(mock.Arguments) { + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Return(sig, nil) + mc.On("SendTx", mock.Anything, signed(1, true, computeUnitLimitDefault)).Return(retry0, nil) + mc.On("SendTx", mock.Anything, signed(2, true, computeUnitLimitDefault)).Return(retry1, nil) + mc.On("SendTx", mock.Anything, signed(3, true, computeUnitLimitDefault)).Return(retry2, nil).Maybe() + mc.On("SendTx", mock.Anything, signed(4, true, computeUnitLimitDefault)).Return(retry3, nil).Maybe() + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimitDefault), mock.Anything).Run(func(mock.Arguments) { wg.Done() }).Return(&rpc.SimulateTransactionResult{}, nil).Once() @@ -564,52 +615,57 @@ func TestTxm(t *testing.T) { statuses[retry1] = func() (out *rpc.SignatureStatusesResult) { defer wg.Done() return &rpc.SignatureStatusesResult{ - ConfirmationStatus: rpc.ConfirmationStatusConfirmed, + ConfirmationStatus: rpc.ConfirmationStatusFinalized, } } // send tx - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx)) + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) wg.Wait() // no transactions stored inflight txs list - waitFor(empty) + waitFor(t, waitDuration, txm, prom, empty) // panic if sendTx called after context cancelled mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() // check prom metric - prom.success++ + prom.finalized++ prom.assertEqual(t) + + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status }) // fee bumping disabled t.Run("feeBumpingDisabled", func(t *testing.T) { - sig := getSig() - tx, signed := getTx(t, 11, mkey, 0) - - defaultFeeBumpPeriod := cfg.FeeBumpPeriod() + sig := randomSignature(t) + tx, signed := getTx(t, 11, mkey) sendCount := 0 var countRW sync.RWMutex - mc.On("SendTx", mock.Anything, signed(0, true)).Run(func(mock.Arguments) { + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Run(func(mock.Arguments) { countRW.Lock() sendCount++ countRW.Unlock() }).Return(sig, nil) // only sends one transaction type (no bumping) - mc.On("SimulateTx", mock.Anything, signed(0, true), mock.Anything).Return(&rpc.SimulateTransactionResult{}, nil).Once() + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimitDefault), mock.Anything).Return(&rpc.SimulateTransactionResult{}, nil).Once() // handle signature status calls var wg sync.WaitGroup wg.Add(1) count := 0 - start := time.Now() statuses[sig] = func() (out *rpc.SignatureStatusesResult) { defer func() { count++ }() out = &rpc.SignatureStatusesResult{} - if time.Since(start) > 2*defaultFeeBumpPeriod { + if count == 1 { out.ConfirmationStatus = rpc.ConfirmationStatusConfirmed + return + } + if count == 2 { + out.ConfirmationStatus = rpc.ConfirmationStatusFinalized wg.Done() return } @@ -618,11 +674,12 @@ func TestTxm(t *testing.T) { } // send tx - with disabled fee bumping - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, SetFeeBumpPeriod(0))) + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID, SetFeeBumpPeriod(0))) wg.Wait() // no transactions stored inflight txs list - waitFor(empty) + waitFor(t, waitDuration, txm, prom, empty) // transaction should be sent more than twice countRW.RLock() t.Logf("sendTx received %d calls", sendCount) @@ -633,48 +690,309 @@ func TestTxm(t *testing.T) { mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() // check prom metric - prom.success++ + prom.confirmed++ + prom.finalized++ prom.assertEqual(t) + + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status }) // compute unit limit disabled t.Run("computeUnitLimitDisabled", func(t *testing.T) { - sig := getSig() - tx, signed := getTx(t, 12, mkey, 0) + sig := randomSignature(t) + tx, signed := getTx(t, 12, mkey) // should only match transaction without compute unit limit - assert.Len(t, signed(0, false).Message.Instructions, 2) - mc.On("SendTx", mock.Anything, signed(0, false)).Return(sig, nil) // only sends one transaction type (no bumping) - mc.On("SimulateTx", mock.Anything, signed(0, false), mock.Anything).Return(&rpc.SimulateTransactionResult{}, nil).Once() + assert.Len(t, signed(0, false, computeUnitLimitDefault).Message.Instructions, 2) + mc.On("SendTx", mock.Anything, signed(0, false, computeUnitLimitDefault)).Return(sig, nil) // only sends one transaction type (no bumping) + mc.On("SimulateTx", mock.Anything, signed(0, false, computeUnitLimitDefault), mock.Anything).Return(&rpc.SimulateTransactionResult{}, nil).Once() // handle signature status calls var wg sync.WaitGroup wg.Add(1) + count := 0 statuses[sig] = func() *rpc.SignatureStatusesResult { - defer wg.Done() + defer func() { count++ }() + if count == 0 { + return &rpc.SignatureStatusesResult{ + ConfirmationStatus: rpc.ConfirmationStatusConfirmed, + } + } + wg.Done() return &rpc.SignatureStatusesResult{ - ConfirmationStatus: rpc.ConfirmationStatusConfirmed, + ConfirmationStatus: rpc.ConfirmationStatusFinalized, } } // send tx - with disabled fee bumping and disabled compute unit limit - assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, SetFeeBumpPeriod(0), SetComputeUnitLimit(0))) + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID, SetFeeBumpPeriod(0), SetComputeUnitLimit(0))) wg.Wait() // no transactions stored inflight txs list - waitFor(empty) + waitFor(t, waitDuration, txm, prom, empty) // panic if sendTx called after context cancelled mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() // check prom metric - prom.success++ + prom.confirmed++ + prom.finalized++ prom.assertEqual(t) + + _, err := txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) // transaction cleared from storage after finalized should not return status }) }) } } +func TestTxm_disabled_confirm_timeout_with_retention(t *testing.T) { + t.Parallel() // run estimator tests in parallel + + // set up configs needed in txm + estimator := "fixed" + id := "mocknet-" + estimator + "-" + uuid.NewString() + t.Logf("Starting new iteration: %s", id) + + ctx := tests.Context(t) + lggr := logger.Test(t) + cfg := config.NewDefault() + cfg.Chain.FeeEstimatorMode = &estimator + // Disable confirm timeout + cfg.Chain.TxConfirmTimeout = relayconfig.MustNewDuration(0 * time.Second) + // Enable retention timeout to keep transactions after finality + cfg.Chain.TxRetentionTimeout = relayconfig.MustNewDuration(5 * time.Second) + mc := mocks.NewReaderWriter(t) + mc.On("GetLatestBlock", mock.Anything).Return(&rpc.GetBlockResult{}, nil).Maybe() + + computeUnitLimitDefault := fees.ComputeUnitLimit(cfg.ComputeUnitLimitDefault()) + + // mock solana keystore + mkey := keyMocks.NewSimpleKeystore(t) + mkey.On("Sign", mock.Anything, mock.Anything, mock.Anything).Return([]byte{}, nil) + + loader := utils.NewLazyLoad(func() (client.ReaderWriter, error) { return mc, nil }) + txm := NewTxm(id, loader, nil, cfg, mkey, lggr) + require.NoError(t, txm.Start(ctx)) + t.Cleanup(func () { require.NoError(t, txm.Close())}) + + // tracking prom metrics + prom := soltxmProm{id: id} + + // handle signature statuses calls + statuses := map[solana.Signature]func() *rpc.SignatureStatusesResult{} + mc.On("SignatureStatuses", mock.Anything, mock.AnythingOfType("[]solana.Signature")).Return( + func(_ context.Context, sigs []solana.Signature) (out []*rpc.SignatureStatusesResult) { + for i := range sigs { + get, exists := statuses[sigs[i]] + if !exists { + out = append(out, nil) + continue + } + out = append(out, get()) + } + return out + }, nil, + ) + + // Test tx is not discarded due to confirm timeout and tracked to finalization + tx, signed := getTx(t, 7, mkey) + sig := randomSignature(t) + retry0 := randomSignature(t) + retry1 := randomSignature(t) + var wg sync.WaitGroup + wg.Add(2) + + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimitDefault)).Return(sig, nil) + mc.On("SendTx", mock.Anything, signed(1, true, computeUnitLimitDefault)).Return(retry0, nil).Maybe() + mc.On("SendTx", mock.Anything, signed(2, true, computeUnitLimitDefault)).Return(retry1, nil).Maybe() + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimitDefault), mock.Anything).Run(func(mock.Arguments) { + wg.Done() + }).Return(&rpc.SimulateTransactionResult{}, nil).Once() + + // handle signature status calls (initial stays processed, others don't exist) + start := time.Now() + statuses[sig] = func() (out *rpc.SignatureStatusesResult) { + out = &rpc.SignatureStatusesResult{} + // return confirmed status after default confirmation timeout + if time.Since(start) > 1*time.Second && time.Since(start) < 2*time.Second { + out.ConfirmationStatus = rpc.ConfirmationStatusConfirmed + return + } + // return finalized status only after the confirmation timeout + if time.Since(start) >= 2*time.Second { + out.ConfirmationStatus = rpc.ConfirmationStatusFinalized + wg.Done() + return + } + out.ConfirmationStatus = rpc.ConfirmationStatusProcessed + return + } + + // tx should be able to queue + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) + wg.Wait() // wait to be picked up and processed + waitFor(t, 5*time.Second, txm, prom, empty) // inflight txs cleared after timeout + + // panic if sendTx called after context cancelled + mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() + + // check prom metric + prom.confirmed++ + prom.finalized++ + prom.assertEqual(t) + + // check transaction status which should still be stored + status, err := txm.GetTransactionStatus(ctx, testTxID) + require.NoError(t, err) + require.Equal(t, types.Finalized, status) + + // Sleep until retention period has passed for transaction and for another reap cycle to run + time.Sleep(10 *time.Second) + + // check if transaction has been purged from memory + status, err = txm.GetTransactionStatus(ctx, testTxID) + require.Error(t, err) + require.Equal(t, types.Unknown, status) +} + +func TestTxm_compute_unit_limit_estimation(t *testing.T) { + t.Parallel() // run estimator tests in parallel + + // set up configs needed in txm + estimator := "fixed" + id := "mocknet-" + estimator + "-" + uuid.NewString() + t.Logf("Starting new iteration: %s", id) + + ctx := tests.Context(t) + lggr := logger.Test(t) + cfg := config.NewDefault() + cfg.Chain.FeeEstimatorMode = &estimator + // Enable compute unit limit estimation feature + estimateComputeUnitLimit := true + cfg.Chain.EstimateComputeUnitLimit = &estimateComputeUnitLimit + // Enable retention timeout to keep transactions after finality or error + cfg.Chain.TxRetentionTimeout = relayconfig.MustNewDuration(5 * time.Second) + mc := mocks.NewReaderWriter(t) + mc.On("GetLatestBlock", mock.Anything).Return(&rpc.GetBlockResult{}, nil).Maybe() + + // mock solana keystore + mkey := keyMocks.NewSimpleKeystore(t) + mkey.On("Sign", mock.Anything, mock.Anything, mock.Anything).Return([]byte{}, nil) + + loader := utils.NewLazyLoad(func() (client.ReaderWriter, error) { return mc, nil }) + txm := NewTxm(id, loader, nil, cfg, mkey, lggr) + require.NoError(t, txm.Start(ctx)) + t.Cleanup(func () { require.NoError(t, txm.Close())}) + + // tracking prom metrics + prom := soltxmProm{id: id} + + // handle signature statuses calls + statuses := map[solana.Signature]func() *rpc.SignatureStatusesResult{} + mc.On("SignatureStatuses", mock.Anything, mock.AnythingOfType("[]solana.Signature")).Return( + func(_ context.Context, sigs []solana.Signature) (out []*rpc.SignatureStatusesResult) { + for i := range sigs { + get, exists := statuses[sigs[i]] + if !exists { + out = append(out, nil) + continue + } + out = append(out, get()) + } + return out + }, nil, + ) + + t.Run("simulation_succeeds", func(t *testing.T) { + // Test tx is not discarded due to confirm timeout and tracked to finalization + tx, signed := getTx(t, 1, mkey) + sig := randomSignature(t) + var wg sync.WaitGroup + wg.Add(3) + + computeUnitConsumed := uint64(1_000_000) + computeUnitLimit := fees.ComputeUnitLimit(uint32(bigmath.AddPercentage(new(big.Int).SetUint64(computeUnitConsumed), EstimateComputeUnitLimitBuffer).Uint64())) + mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimit)).Return(sig, nil) + // First simulated before broadcast without signature or compute unit limit set + mc.On("SimulateTx", mock.Anything, tx, mock.Anything).Run(func(mock.Arguments) { + wg.Done() + }).Return(&rpc.SimulateTransactionResult{UnitsConsumed: &computeUnitConsumed}, nil).Once() + // Second simulated after broadcast with signature and compute unit limit set + mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimit), mock.Anything).Run(func(mock.Arguments) { + wg.Done() + }).Return(&rpc.SimulateTransactionResult{UnitsConsumed: &computeUnitConsumed}, nil).Once() + + // handle signature status calls + count := 0 + statuses[sig] = func() (out *rpc.SignatureStatusesResult) { + defer func() { count++ }() + out = &rpc.SignatureStatusesResult{} + if count == 1 { + out.ConfirmationStatus = rpc.ConfirmationStatusProcessed + return + } + if count == 2 { + out.ConfirmationStatus = rpc.ConfirmationStatusConfirmed + return + } + if count == 3 { + out.ConfirmationStatus = rpc.ConfirmationStatusFinalized + wg.Done() + return + } + return nil + } + + // send tx + testTxID := uuid.New().String() + assert.NoError(t, txm.Enqueue(ctx, t.Name(), tx, &testTxID)) + wg.Wait() + + // no transactions stored inflight txs list + waitFor(t, txm.cfg.TxConfirmTimeout(), txm, prom, empty) + + // panic if sendTx called after context cancelled + mc.On("SendTx", mock.Anything, tx).Panic("SendTx should not be called anymore").Maybe() + + // check prom metric + prom.confirmed++ + prom.finalized++ + prom.assertEqual(t) + + status, err := txm.GetTransactionStatus(ctx, testTxID) + require.NoError(t, err) + require.Equal(t, types.Finalized, status) + }) + + t.Run("simulation_fails", func(t *testing.T) { + // Test tx is not discarded due to confirm timeout and tracked to finalization + tx, signed := getTx(t, 1, mkey) + sig := randomSignature(t) + + mc.On("SendTx", mock.Anything, signed(0, true, fees.ComputeUnitLimit(0))).Return(sig, nil).Panic("SendTx should never be called").Maybe() + mc.On("SimulateTx", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("simulation failed")).Once() + + // tx should NOT be able to queue + assert.Error(t, txm.Enqueue(ctx, t.Name(), tx, nil)) + }) + + t.Run("simulation_returns_error", func(t *testing.T) { + // Test tx is not discarded due to confirm timeout and tracked to finalization + tx, signed := getTx(t, 1, mkey) + sig := randomSignature(t) + + mc.On("SendTx", mock.Anything, signed(0, true, fees.ComputeUnitLimit(0))).Return(sig, nil).Panic("SendTx should never be called").Maybe() + mc.On("SimulateTx", mock.Anything, tx, mock.Anything).Return(&rpc.SimulateTransactionResult{Err: errors.New("tx err")}, nil).Once() + + // tx should NOT be able to queue + assert.Error(t, txm.Enqueue(ctx, t.Name(), tx, nil)) + }) +} + func TestTxm_Enqueue(t *testing.T) { // set up configs needed in txm lggr := logger.Test(t) @@ -729,7 +1047,7 @@ func TestTxm_Enqueue(t *testing.T) { loader := utils.NewLazyLoad(func() (client.ReaderWriter, error) { return mc, nil }) txm := NewTxm("enqueue_test", loader, nil, cfg, mkey, lggr) - require.ErrorContains(t, txm.Enqueue(ctx, "txmUnstarted", &solana.Transaction{}), "not started") + require.ErrorContains(t, txm.Enqueue(ctx, "txmUnstarted", &solana.Transaction{}, nil), "not started") require.NoError(t, txm.Start(ctx)) t.Cleanup(func() { require.NoError(t, txm.Close()) }) @@ -747,10 +1065,10 @@ func TestTxm_Enqueue(t *testing.T) { for _, run := range txs { t.Run(run.name, func(t *testing.T) { if !run.fail { - assert.NoError(t, txm.Enqueue(ctx, run.name, run.tx)) + assert.NoError(t, txm.Enqueue(ctx, run.name, run.tx, nil)) return } - assert.Error(t, txm.Enqueue(ctx, run.name, run.tx)) + assert.Error(t, txm.Enqueue(ctx, run.name, run.tx, nil)) }) } } diff --git a/pkg/solana/txm/txm_load_test.go b/pkg/solana/txm/txm_load_test.go index 744610e1f..5d5a8061b 100644 --- a/pkg/solana/txm/txm_load_test.go +++ b/pkg/solana/txm/txm_load_test.go @@ -104,16 +104,16 @@ func TestTxm_Integration(t *testing.T) { } // enqueue txs (must pass to move on to load test) - require.NoError(t, txm.Enqueue(ctx, "test_success_0", createTx(pubKey, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL))) - require.Error(t, txm.Enqueue(ctx, "test_invalidSigner", createTx(pubKeyReceiver, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL))) // cannot sign tx before enqueuing - require.NoError(t, txm.Enqueue(ctx, "test_invalidReceiver", createTx(pubKey, pubKey, solana.PublicKey{}, solana.LAMPORTS_PER_SOL))) + require.NoError(t, txm.Enqueue(ctx, "test_success_0", createTx(pubKey, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL), nil)) + require.Error(t, txm.Enqueue(ctx, "test_invalidSigner", createTx(pubKeyReceiver, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL), nil)) // cannot sign tx before enqueuing + require.NoError(t, txm.Enqueue(ctx, "test_invalidReceiver", createTx(pubKey, pubKey, solana.PublicKey{}, solana.LAMPORTS_PER_SOL), nil)) time.Sleep(500 * time.Millisecond) // pause 0.5s for new blockhash - require.NoError(t, txm.Enqueue(ctx, "test_success_1", createTx(pubKey, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL))) - require.NoError(t, txm.Enqueue(ctx, "test_txFail", createTx(pubKey, pubKey, pubKeyReceiver, 1000*solana.LAMPORTS_PER_SOL))) + require.NoError(t, txm.Enqueue(ctx, "test_success_1", createTx(pubKey, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL), nil)) + require.NoError(t, txm.Enqueue(ctx, "test_txFail", createTx(pubKey, pubKey, pubKeyReceiver, 1000*solana.LAMPORTS_PER_SOL), nil)) // load test: try to overload txs, confirm, or simulation for i := 0; i < 1000; i++ { - assert.NoError(t, txm.Enqueue(ctx, fmt.Sprintf("load_%d", i), createTx(loadTestKey.PublicKey(), loadTestKey.PublicKey(), loadTestKey.PublicKey(), uint64(i)))) + assert.NoError(t, txm.Enqueue(ctx, fmt.Sprintf("load_%d", i), createTx(loadTestKey.PublicKey(), loadTestKey.PublicKey(), loadTestKey.PublicKey(), uint64(i)), nil)) time.Sleep(10 * time.Millisecond) // ~100 txs per second (note: have run 5ms delays for ~200tx/s succesfully) } diff --git a/pkg/solana/txm/txm_race_test.go b/pkg/solana/txm/txm_race_test.go index 81f2c15f6..42062718f 100644 --- a/pkg/solana/txm/txm_race_test.go +++ b/pkg/solana/txm/txm_race_test.go @@ -27,9 +27,11 @@ import ( "github.com/stretchr/testify/require" ) -func NewTestTx() (tx solanaGo.Transaction) { +func NewTestMsg() (msg pendingTx) { + tx := solanaGo.Transaction{} tx.Message.AccountKeys = append(tx.Message.AccountKeys, solanaGo.PublicKey{}) - return tx + msg.tx = tx + return msg } // Test race condition for saving + reading signatures when bumping fees @@ -59,7 +61,7 @@ func TestTxm_SendWithRetry_Race(t *testing.T) { ks.On("Sign", mock.Anything, mock.Anything, mock.Anything).Return([]byte{}, nil) // assemble minimal tx for testing retry - tx := NewTestTx() + msg := NewTestMsg() testRunner := func(t *testing.T, client solanaClient.ReaderWriter) { // build minimal txm @@ -69,11 +71,9 @@ func TestTxm_SendWithRetry_Race(t *testing.T) { txm := NewTxm("retry_race", loader, nil, cfg, ks, lggr) txm.fee = fee - _, _, _, err := txm.sendWithRetry( - tests.Context(t), - tx, - txm.defaultTxConfig(), - ) + msg.cfg = txm.defaultTxConfig() + + _, _, _, err := txm.sendWithRetry(tests.Context(t), msg) require.NoError(t, err) time.Sleep(txRetryDuration / 4 * 5) // wait 1.25x longer of tx life to capture all logs @@ -206,34 +206,34 @@ func TestTxm_SendWithRetry_Race(t *testing.T) { t.Run("bumping tx errors and ctx cleans up waitgroup blocks", func(t *testing.T) { client := clientmocks.NewReaderWriter(t) // client mock - first tx is always successful - tx0 := NewTestTx() - require.NoError(t, fees.SetComputeUnitPrice(&tx0, 0)) - require.NoError(t, fees.SetComputeUnitLimit(&tx0, 200_000)) - tx0.Signatures = make([]solanaGo.Signature, 1) - client.On("SendTx", mock.Anything, &tx0).Return(solanaGo.Signature{1}, nil) + msg0 := NewTestMsg() + require.NoError(t, fees.SetComputeUnitPrice(&msg0.tx, 0)) + require.NoError(t, fees.SetComputeUnitLimit(&msg0.tx, 200_000)) + msg0.tx.Signatures = make([]solanaGo.Signature, 1) + client.On("SendTx", mock.Anything, &msg0.tx).Return(solanaGo.Signature{1}, nil) // init bump tx fails, rebroadcast is successful - tx1 := NewTestTx() - require.NoError(t, fees.SetComputeUnitPrice(&tx1, 1)) - require.NoError(t, fees.SetComputeUnitLimit(&tx1, 200_000)) - tx1.Signatures = make([]solanaGo.Signature, 1) - client.On("SendTx", mock.Anything, &tx1).Return(solanaGo.Signature{}, fmt.Errorf("BUMP FAILED")).Once() - client.On("SendTx", mock.Anything, &tx1).Return(solanaGo.Signature{2}, nil) + msg1 := NewTestMsg() + require.NoError(t, fees.SetComputeUnitPrice(&msg1.tx, 1)) + require.NoError(t, fees.SetComputeUnitLimit(&msg1.tx, 200_000)) + msg1.tx.Signatures = make([]solanaGo.Signature, 1) + client.On("SendTx", mock.Anything, &msg1.tx).Return(solanaGo.Signature{}, fmt.Errorf("BUMP FAILED")).Once() + client.On("SendTx", mock.Anything, &msg1.tx).Return(solanaGo.Signature{2}, nil) // init bump tx success, rebroadcast fails - tx2 := NewTestTx() - require.NoError(t, fees.SetComputeUnitPrice(&tx2, 2)) - require.NoError(t, fees.SetComputeUnitLimit(&tx2, 200_000)) - tx2.Signatures = make([]solanaGo.Signature, 1) - client.On("SendTx", mock.Anything, &tx2).Return(solanaGo.Signature{3}, nil).Once() - client.On("SendTx", mock.Anything, &tx2).Return(solanaGo.Signature{}, fmt.Errorf("REBROADCAST FAILED")) + msg2 := NewTestMsg() + require.NoError(t, fees.SetComputeUnitPrice(&msg2.tx, 2)) + require.NoError(t, fees.SetComputeUnitLimit(&msg2.tx, 200_000)) + msg2.tx.Signatures = make([]solanaGo.Signature, 1) + client.On("SendTx", mock.Anything, &msg2.tx).Return(solanaGo.Signature{3}, nil).Once() + client.On("SendTx", mock.Anything, &msg2.tx).Return(solanaGo.Signature{}, fmt.Errorf("REBROADCAST FAILED")) // always successful - tx3 := NewTestTx() - require.NoError(t, fees.SetComputeUnitPrice(&tx3, 4)) - require.NoError(t, fees.SetComputeUnitLimit(&tx3, 200_000)) - tx3.Signatures = make([]solanaGo.Signature, 1) - client.On("SendTx", mock.Anything, &tx3).Return(solanaGo.Signature{4}, nil) + msg3 := NewTestMsg() + require.NoError(t, fees.SetComputeUnitPrice(&msg3.tx, 4)) + require.NoError(t, fees.SetComputeUnitLimit(&msg3.tx, 200_000)) + msg3.tx.Signatures = make([]solanaGo.Signature, 1) + client.On("SendTx", mock.Anything, &msg3.tx).Return(solanaGo.Signature{4}, nil) testRunner(t, client) }) diff --git a/pkg/solana/txm/utils.go b/pkg/solana/txm/utils.go index 360a2330e..6b2253818 100644 --- a/pkg/solana/txm/utils.go +++ b/pkg/solana/txm/utils.go @@ -11,17 +11,42 @@ import ( "github.com/gagliardetto/solana-go/rpc" ) +type TxState int + // tx not found +// < tx errored +// < tx broadcasted // < tx processed -// < tx confirmed/finalized + revert -// < tx confirmed/finalized + success +// < tx confirmed +// < tx finalized const ( - NotFound = iota + NotFound TxState = iota + Errored + Broadcasted Processed - ConfirmedRevert - ConfirmedSuccess + Confirmed + Finalized ) +func (s TxState) String() string { + switch s { + case NotFound: + return "NotFound" + case Errored: + return "Errored" + case Broadcasted: + return "Broadcasted" + case Processed: + return "Processed" + case Confirmed: + return "Confirmed" + case Finalized: + return "Finalized" + default: + return fmt.Sprintf("TxState(%d)", s) + } +} + type statuses struct { sigs []solana.Signature res []*rpc.SignatureStatusesResult @@ -53,7 +78,7 @@ func SortSignaturesAndResults(sigs []solana.Signature, res []*rpc.SignatureStatu return s.sigs, s.res, nil } -func convertStatus(res *rpc.SignatureStatusesResult) uint { +func convertStatus(res *rpc.SignatureStatusesResult) TxState { if res == nil { return NotFound } @@ -62,12 +87,21 @@ func convertStatus(res *rpc.SignatureStatusesResult) uint { return Processed } - if res.ConfirmationStatus == rpc.ConfirmationStatusConfirmed || - res.ConfirmationStatus == rpc.ConfirmationStatusFinalized { + if res.ConfirmationStatus == rpc.ConfirmationStatusConfirmed { + // If result contains error, consider the transaction errored to avoid wasted resources on re-org and expiration protection + if res.Err != nil { + return Errored + } + return Confirmed + } + + if res.ConfirmationStatus == rpc.ConfirmationStatusFinalized { + // If result contains error, consider the transaction errored + // Should be caught earlier but checked here in case confirmed is skipped due to delays or slow polling if res.Err != nil { - return ConfirmedRevert + return Errored } - return ConfirmedSuccess + return Finalized } return NotFound diff --git a/pkg/solana/txm/utils_test.go b/pkg/solana/txm/utils_test.go index 0530495d7..f4ac868ff 100644 --- a/pkg/solana/txm/utils_test.go +++ b/pkg/solana/txm/utils_test.go @@ -29,15 +29,15 @@ func TestSortSignaturesAndResults(t *testing.T) { sig, statuses, err = SortSignaturesAndResults(sig, statuses) require.NoError(t, err) - // new expected order [1, 3, 0, 2] + // new expected order [1, 0, 3, 2] assert.Equal(t, rpc.SignatureStatusesResult{ConfirmationStatus: rpc.ConfirmationStatusConfirmed}, *statuses[0]) - assert.Equal(t, rpc.SignatureStatusesResult{ConfirmationStatus: rpc.ConfirmationStatusConfirmed, Err: "ERROR"}, *statuses[1]) - assert.Equal(t, rpc.SignatureStatusesResult{ConfirmationStatus: rpc.ConfirmationStatusProcessed}, *statuses[2]) + assert.Equal(t, rpc.SignatureStatusesResult{ConfirmationStatus: rpc.ConfirmationStatusProcessed}, *statuses[1]) + assert.Equal(t, rpc.SignatureStatusesResult{ConfirmationStatus: rpc.ConfirmationStatusConfirmed, Err: "ERROR"}, *statuses[2]) assert.True(t, nil == statuses[3]) assert.Equal(t, solana.Signature{1}, sig[0]) - assert.Equal(t, solana.Signature{3}, sig[1]) - assert.Equal(t, solana.Signature{0}, sig[2]) + assert.Equal(t, solana.Signature{0}, sig[1]) + assert.Equal(t, solana.Signature{3}, sig[2]) assert.Equal(t, solana.Signature{2}, sig[3]) } From 8b8369c1f44eb2d7de5803a77cf58bde1d42f066 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 15 Nov 2024 14:11:42 -0500 Subject: [PATCH 06/12] MultiNode Soak Testing (#894) * Add defaults * Add latest block methods * Address comments * lint * Fix lint overflow issues * Update transaction_sender.go * Fix lint * Validate node config * Update toml.go * Add SendOnly nodes * Use pointers on config * Add test outlines * Use test context * Use configured selection mode * Set defaults * lint * Add nil check * Add client test * Add subscription test * tidy * Fix imports * Update chain_test.go * Update multinode.go * Add comments * Update multinode.go * Wrap multinode config * Fix imports * Update .golangci.yml * Use MultiNode * Add multinode to txm * Use MultiNode * Update chain.go * Update balance_test.go * Add retries * Fix head * Update client.go * lint * lint * Use MultiNode TxSender * Update txm_internal_test.go * Address comments * Remove total difficulty * Register polling subs * Extract MultiNodeClient * Remove caching changes * Undo cache changes * Fix tests * Update chain.go * Fix variables * Move classify errors * Fix imports * lint * Update txm_internal_test.go * Update txm_internal_test.go * lint * Fix error classification * Update txm_internal_test.go * Update multinode_client.go * lint * Update classify_errors.go * Update classify_errors.go * Add tests * Add test coverage * lint * Add dial comment * CTF bump for image build * Update pkg/solana/client/multinode_client.go Co-authored-by: Dmytro Haidashenko <34754799+dhaidashenko@users.noreply.github.com> * Update txm.go * Create loader * Update transaction_sender.go * Fix tests * Update txm_internal_test.go * lint * Update txm.go * Add ctx * Fix imports * Add SendTxResult to TxSender * Update chain_test.go * Enable MultiNode * Move error classification * Add MultiNode config * Use loader * Update multinode.go * Update multinode.go * Use loader in txm tests * lint * Update testconfig.go * Update loader * Use single RPC * Fix tests * lint * Use default thresholds * Address comments * Update classify_errors.go * Update testconfig.go * Update errors * lint * Fix SendTransaction * Update chain.go * Update sendTx * Fix ctx issues * Enable multiple RPCs in soak tests * Update defaults for testing * Add health check tags * Increase sync threshold * Validate heads * Use latestChainInfo * Fix AliveLoop bug * Update configurations * Update transaction_sender.go * Get chain info * Update ctx * Update transaction_sender.go * Update transaction_sender.go * Increase tx timeout * Update transaction_sender.go * Update ctx * Add timer * Update transaction_sender.go * Update transaction_sender.go * Update testconfig.go * Fix ctx * Remove debug logging * Update run_soak_test.sh * lint * Add debugging logs * Fix ctx cancel * Fix ctx cancel * Fix DoAll ctx * Remove debugging logs * Remove logs * defer reportWg * Add result ctx logging * log on close * Update transaction_sender.go * add cancel func * Update transaction_sender.go * Update transaction_sender.go * Add ctx to reportSendTxAnomalies * Update comments * Fix comments * Address comments * lint * lint * Pass context * Update node_lifecycle.go * Use get reader function * Make rpcurls plural * Fix reader getters * lint * fix imports * Update transaction_sender.go * Remove TxError * Rename getReader * lint * Update chain_test.go * Update transmissions_cache.go * Update run_soak_test.sh * Fix deprecated method * Clean up getReader * Use AccountReader --------- Co-authored-by: Damjan Smickovski Co-authored-by: Dmytro Haidashenko <34754799+dhaidashenko@users.noreply.github.com> --- integration-tests/common/common.go | 14 +- integration-tests/common/test_common.go | 16 +-- integration-tests/config/config.go | 12 +- integration-tests/scripts/run_soak_test.sh | 59 ++++++++ integration-tests/solclient/solclient.go | 2 +- integration-tests/solclient/store.go | 4 +- integration-tests/testconfig/testconfig.go | 72 ++++++---- pkg/monitoring/chain_reader.go | 7 +- pkg/solana/cache_test.go | 26 ++-- pkg/solana/chain.go | 5 +- pkg/solana/chain_test.go | 6 +- pkg/solana/client/multinode/multi_node.go | 2 +- pkg/solana/client/multinode/node_lifecycle.go | 34 +++-- pkg/solana/client/multinode/poller.go | 2 +- .../client/multinode/transaction_sender.go | 132 +++++++++--------- pkg/solana/client/multinode_client.go | 46 +++--- pkg/solana/config/multinode.go | 24 ++-- pkg/solana/config_tracker.go | 10 +- pkg/solana/config_tracker_test.go | 4 +- pkg/solana/relay.go | 16 +-- pkg/solana/state_cache.go | 16 ++- pkg/solana/transmissions_cache.go | 16 ++- pkg/solana/transmitter.go | 11 +- pkg/solana/transmitter_test.go | 3 +- 24 files changed, 332 insertions(+), 207 deletions(-) create mode 100755 integration-tests/scripts/run_soak_test.sh diff --git a/integration-tests/common/common.go b/integration-tests/common/common.go index c4de45aea..05ccabbad 100644 --- a/integration-tests/common/common.go +++ b/integration-tests/common/common.go @@ -51,7 +51,7 @@ type TestEnvDetails struct { type ChainDetails struct { ChainName string ChainID string - RPCUrl string + RPCUrls []string RPCURLExternal string WSURLExternal string ProgramAddresses *chainConfig.ProgramAddresses @@ -116,9 +116,9 @@ func New(testConfig *tc.TestConfig) *Common { config = chainConfig.DevnetConfig() privateKeyString = *testConfig.Common.PrivateKey - if *testConfig.Common.RPCURL != "" { - config.RPCUrl = *testConfig.Common.RPCURL - config.WSUrl = *testConfig.Common.WsURL + if len(*testConfig.Common.RPCURLs) > 0 { + config.RPCUrls = *testConfig.Common.RPCURLs + config.WSUrls = *testConfig.Common.WsURLs config.ProgramAddresses = &chainConfig.ProgramAddresses{ OCR2: *testConfig.SolanaConfig.OCR2ProgramID, AccessController: *testConfig.SolanaConfig.AccessControllerProgramID, @@ -130,7 +130,7 @@ func New(testConfig *tc.TestConfig) *Common { c = &Common{ ChainDetails: &ChainDetails{ ChainID: config.ChainID, - RPCUrl: config.RPCUrl, + RPCUrls: config.RPCUrls, ChainName: config.ChainName, ProgramAddresses: config.ProgramAddresses, }, @@ -146,7 +146,7 @@ func New(testConfig *tc.TestConfig) *Common { } // provide getters for TestConfig (pointers to chain details) c.TestConfig.GetChainID = func() string { return c.ChainDetails.ChainID } - c.TestConfig.GetURL = func() string { return c.ChainDetails.RPCUrl } + c.TestConfig.GetURL = func() []string { return c.ChainDetails.RPCUrls } return c } @@ -298,7 +298,7 @@ func (c *Common) CreateJobsForContract(contractNodeInfo *ContractNodeInfo) error bootstrapNodeInternalIP = contractNodeInfo.BootstrapNode.InternalIP() } relayConfig := job.JSONConfig{ - "nodeEndpointHTTP": c.ChainDetails.RPCUrl, + "nodeEndpointHTTP": c.ChainDetails.RPCUrls, "ocr2ProgramID": contractNodeInfo.OCR2.ProgramAddress(), "transmissionsID": contractNodeInfo.Store.TransmissionsAddress(), "storeProgramID": contractNodeInfo.Store.ProgramAddress(), diff --git a/integration-tests/common/test_common.go b/integration-tests/common/test_common.go index a775a5199..b351ee73d 100644 --- a/integration-tests/common/test_common.go +++ b/integration-tests/common/test_common.go @@ -118,9 +118,9 @@ func (m *OCRv2TestState) DeployCluster(contractsDir string) { m.Common.ChainDetails.WSURLExternal = m.Common.Env.URLs["sol"][1] if *m.Config.TestConfig.Common.Network == "devnet" { - m.Common.ChainDetails.RPCUrl = *m.Config.TestConfig.Common.RPCURL - m.Common.ChainDetails.RPCURLExternal = *m.Config.TestConfig.Common.RPCURL - m.Common.ChainDetails.WSURLExternal = *m.Config.TestConfig.Common.WsURL + m.Common.ChainDetails.RPCUrls = *m.Config.TestConfig.Common.RPCURLs + m.Common.ChainDetails.RPCURLExternal = (*m.Config.TestConfig.Common.RPCURLs)[0] + m.Common.ChainDetails.WSURLExternal = (*m.Config.TestConfig.Common.WsURLs)[0] } m.Common.ChainDetails.MockserverURLInternal = m.Common.Env.URLs["qa_mock_adapter_internal"][0] @@ -133,14 +133,14 @@ func (m *OCRv2TestState) DeployCluster(contractsDir string) { require.NoError(m.Config.T, err) // Setting the External RPC url for Gauntlet - m.Common.ChainDetails.RPCUrl = sol.InternalHTTPURL + m.Common.ChainDetails.RPCUrls = []string{sol.InternalHTTPURL} m.Common.ChainDetails.RPCURLExternal = sol.ExternalHTTPURL m.Common.ChainDetails.WSURLExternal = sol.ExternalWsURL if *m.Config.TestConfig.Common.Network == "devnet" { - m.Common.ChainDetails.RPCUrl = *m.Config.TestConfig.Common.RPCURL - m.Common.ChainDetails.RPCURLExternal = *m.Config.TestConfig.Common.RPCURL - m.Common.ChainDetails.WSURLExternal = *m.Config.TestConfig.Common.WsURL + m.Common.ChainDetails.RPCUrls = *m.Config.TestConfig.Common.RPCURLs + m.Common.ChainDetails.RPCURLExternal = (*m.Config.TestConfig.Common.RPCURLs)[0] + m.Common.ChainDetails.WSURLExternal = (*m.Config.TestConfig.Common.WsURLs)[0] } b, err := test_env.NewCLTestEnvBuilder(). @@ -273,7 +273,7 @@ func (m *OCRv2TestState) CreateJobs() { require.NoError(m.Config.T, err, "Error connecting to websocket client") relayConfig := job.JSONConfig{ - "nodeEndpointHTTP": m.Common.ChainDetails.RPCUrl, + "nodeEndpointHTTP": m.Common.ChainDetails.RPCUrls, "ocr2ProgramID": m.Common.ChainDetails.ProgramAddresses.OCR2, "transmissionsID": m.Gauntlet.FeedAddress, "storeProgramID": m.Common.ChainDetails.ProgramAddresses.Store, diff --git a/integration-tests/config/config.go b/integration-tests/config/config.go index 232dfa5d3..1b96b1f77 100644 --- a/integration-tests/config/config.go +++ b/integration-tests/config/config.go @@ -3,8 +3,8 @@ package config type Config struct { ChainName string ChainID string - RPCUrl string - WSUrl string + RPCUrls []string + WSUrls []string ProgramAddresses *ProgramAddresses PrivateKey string } @@ -20,8 +20,8 @@ func DevnetConfig() *Config { ChainName: "solana", ChainID: "devnet", // Will be overridden if set in toml - RPCUrl: "https://api.devnet.solana.com", - WSUrl: "wss://api.devnet.solana.com/", + RPCUrls: []string{"https://api.devnet.solana.com"}, + WSUrls: []string{"wss://api.devnet.solana.com/"}, } } @@ -30,8 +30,8 @@ func LocalNetConfig() *Config { ChainName: "solana", ChainID: "localnet", // Will be overridden if set in toml - RPCUrl: "http://sol:8899", - WSUrl: "ws://sol:8900", + RPCUrls: []string{"http://sol:8899"}, + WSUrls: []string{"ws://sol:8900"}, ProgramAddresses: &ProgramAddresses{ OCR2: "E3j24rx12SyVsG6quKuZPbQqZPkhAUCh8Uek4XrKYD2x", AccessController: "2ckhep7Mvy1dExenBqpcdevhRu7CLuuctMcx7G9mWEvo", diff --git a/integration-tests/scripts/run_soak_test.sh b/integration-tests/scripts/run_soak_test.sh new file mode 100755 index 000000000..7e5490859 --- /dev/null +++ b/integration-tests/scripts/run_soak_test.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +NODE_VERSION=18 + +cd ../smoke || exit + +echo "Switching to required Node.js version $NODE_VERSION..." +export NVM_DIR="$HOME/.nvm" +[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" +nvm use $NODE_VERSION + +echo "Initializing soak test..." +terminated_by_script=false +while IFS= read -r line; do + echo "$line" + # Check if the line contains the target string + if echo "$line" | grep -q "ocr2:inspect:responses"; then + # Send SIGINT (Ctrl+C) to the 'go test' process + sudo pkill -INT -P $$ go 2>/dev/null + terminated_by_script=true + break + fi +done < <(sudo go test -timeout 24h -count=1 -run TestSolanaOCRV2Smoke/embedded -test.timeout 30m 2>&1) + +# Capture the PID of the background process +READER_PID=$! + +# Start a background timer (sleeps for 15 minutes, then sends SIGALRM to the script) +( sleep 900 && kill -s ALRM $$ ) & +TIMER_PID=$! + +# Set a trap to catch the SIGALRM signal for timeout +trap 'on_timeout' ALRM + +# Function to handle timeout +on_timeout() { + echo "Error: failed to start soak test: timeout exceeded (15 minutes)." + # Send SIGINT to the 'go test' process + pkill -INT -P $$ go 2>/dev/null + # Clean up + kill "$TIMER_PID" 2>/dev/null + kill "$READER_PID" 2>/dev/null + exit 1 +} + +# Wait for the reader process to finish +wait "$READER_PID" +EXIT_STATUS=$? + +# Clean up: kill the timer process if it's still running +kill "$TIMER_PID" 2>/dev/null + +if [ "$terminated_by_script" = true ]; then + echo "Soak test started successfully" + exit 0 +else + echo "Soak test failed to start" + exit 1 +fi diff --git a/integration-tests/solclient/solclient.go b/integration-tests/solclient/solclient.go index 7b3921c19..2d5f52ac7 100644 --- a/integration-tests/solclient/solclient.go +++ b/integration-tests/solclient/solclient.go @@ -481,7 +481,7 @@ func SendFunds(senderPrivateKey string, receiverPublicKey string, lamports uint6 accountTo := solana.MustPublicKeyFromBase58(receiverPublicKey) // Get recent blockhash - recent, err := rpcClient.GetRecentBlockhash(context.Background(), rpc.CommitmentFinalized) + recent, err := rpcClient.GetLatestBlockhash(context.Background(), rpc.CommitmentFinalized) if err != nil { return err } diff --git a/integration-tests/solclient/store.go b/integration-tests/solclient/store.go index 238d5cc31..3bc48bec9 100644 --- a/integration-tests/solclient/store.go +++ b/integration-tests/solclient/store.go @@ -8,6 +8,7 @@ import ( "github.com/smartcontractkit/chainlink-solana/contracts/generated/store" relaySol "github.com/smartcontractkit/chainlink-solana/pkg/solana" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" ) type Store struct { @@ -19,7 +20,8 @@ type Store struct { } func (m *Store) GetLatestRoundData() (uint64, uint64, uint64, error) { - a, _, err := relaySol.GetLatestTransmission(context.Background(), m.Client.RPC, m.Feed.PublicKey(), rpc.CommitmentConfirmed) + getReader := func() (client.AccountReader, error) { return m.Client.RPC, nil } + a, _, err := relaySol.GetLatestTransmission(context.Background(), getReader, m.Feed.PublicKey(), rpc.CommitmentConfirmed) if err != nil { return 0, 0, 0, err } diff --git a/integration-tests/testconfig/testconfig.go b/integration-tests/testconfig/testconfig.go index 394d2bcee..1f482b7f5 100644 --- a/integration-tests/testconfig/testconfig.go +++ b/integration-tests/testconfig/testconfig.go @@ -44,7 +44,7 @@ type TestConfig struct { // getter funcs for passing parameters GetChainID func() string - GetURL func() string + GetURL func() []string } const ( @@ -188,22 +188,22 @@ func (c *TestConfig) ReadFromEnvVar() error { c.Network.RpcWsUrls = rpcWsUrls } - commonRPCURL := ctf_config.MustReadEnvVar_String(E2E_TEST_COMMON_RPC_URL_ENV) - if commonRPCURL != "" { + commonRPCURL := ctf_config.MustReadEnvVar_Strings(E2E_TEST_COMMON_RPC_URL_ENV, ",") + if len(commonRPCURL) > 0 { if c.Common == nil { c.Common = &Common{} } - logger.Info().Msgf("Using %s env var to override Common.RPCURL", E2E_TEST_COMMON_RPC_URL_ENV) - c.Common.RPCURL = &commonRPCURL + logger.Info().Msgf("Using %s env var to override Common.RPCURLs", E2E_TEST_COMMON_RPC_URL_ENV) + c.Common.RPCURLs = &commonRPCURL } - commonWSURL := ctf_config.MustReadEnvVar_String(E2E_TEST_COMMON_WS_URL_ENV) - if commonWSURL != "" { + commonWSURL := ctf_config.MustReadEnvVar_Strings(E2E_TEST_COMMON_WS_URL_ENV, ",") + if len(commonWSURL) > 0 { if c.Common == nil { c.Common = &Common{} } - logger.Info().Msgf("Using %s env var to override Common.WsURL", E2E_TEST_COMMON_WS_URL_ENV) - c.Common.WsURL = &commonWSURL + logger.Info().Msgf("Using %s env var to override Common.WsURLs", E2E_TEST_COMMON_WS_URL_ENV) + c.Common.WsURLs = &commonWSURL } commonPrivateKey := ctf_config.MustReadEnvVar_String(E2E_TEST_COMMON_PRIVATE_KEY_ENV) @@ -256,7 +256,8 @@ func (c *TestConfig) GetNodeConfig() *ctf_config.NodeConfig { } func (c *TestConfig) GetNodeConfigTOML() (string, error) { - var chainID, url string + var chainID string + var url []string if c.GetChainID != nil { chainID = c.GetChainID() } @@ -264,16 +265,35 @@ func (c *TestConfig) GetNodeConfigTOML() (string, error) { url = c.GetURL() } - solConfig := solcfg.TOMLConfig{ - Enabled: ptr.Ptr(true), - ChainID: ptr.Ptr(chainID), - Nodes: []*solcfg.Node{ - { - Name: ptr.Ptr("primary"), - URL: config.MustParseURL(url), - }, + mnConfig := solcfg.MultiNodeConfig{ + MultiNode: solcfg.MultiNode{ + Enabled: ptr.Ptr(true), + SyncThreshold: ptr.Ptr(uint32(170)), }, } + mnConfig.SetDefaults() + + var nodes []*solcfg.Node + for i, u := range url { + nodes = append(nodes, &solcfg.Node{ + Name: ptr.Ptr(fmt.Sprintf("primary-%d", i)), + URL: config.MustParseURL(u), + }) + } + + chainCfg := solcfg.Chain{ + // Increase timeout for TransactionSender + TxTimeout: config.MustNewDuration(2 * time.Minute), + } + chainCfg.SetDefaults() + + solConfig := solcfg.TOMLConfig{ + Enabled: ptr.Ptr(true), + ChainID: ptr.Ptr(chainID), + Nodes: nodes, + MultiNode: mnConfig, + Chain: chainCfg, + } baseConfig := node.NewBaseConfig() baseConfig.Solana = solcfg.TOMLConfigs{ &solConfig, @@ -357,12 +377,12 @@ type Common struct { InsideK8s *bool `toml:"inside_k8"` User *string `toml:"user"` // if rpc requires api key to be passed as an HTTP header - RPCURL *string `toml:"-"` - WsURL *string `toml:"-"` - PrivateKey *string `toml:"-"` - Stateful *bool `toml:"stateful_db"` - InternalDockerRepo *string `toml:"internal_docker_repo"` - DevnetImage *string `toml:"devnet_image"` + RPCURLs *[]string `toml:"-"` + WsURLs *[]string `toml:"-"` + PrivateKey *string `toml:"-"` + Stateful *bool `toml:"stateful_db"` + InternalDockerRepo *string `toml:"internal_docker_repo"` + DevnetImage *string `toml:"devnet_image"` } type SolanaConfig struct { @@ -410,10 +430,10 @@ func (c *Common) Validate() error { if c.PrivateKey == nil { return fmt.Errorf("private_key must be set") } - if c.RPCURL == nil { + if c.RPCURLs == nil { return fmt.Errorf("rpc_url must be set") } - if c.WsURL == nil { + if c.WsURLs == nil { return fmt.Errorf("rpc_url must be set") } diff --git a/pkg/monitoring/chain_reader.go b/pkg/monitoring/chain_reader.go index eb4d4b8e5..9b8c8ebff 100644 --- a/pkg/monitoring/chain_reader.go +++ b/pkg/monitoring/chain_reader.go @@ -7,6 +7,7 @@ import ( "github.com/gagliardetto/solana-go/rpc" pkgSolana "github.com/smartcontractkit/chainlink-solana/pkg/solana" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" ) //go:generate mockery --name ChainReader --output ./mocks/ @@ -31,11 +32,13 @@ type chainReader struct { } func (c *chainReader) GetState(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (state pkgSolana.State, blockHeight uint64, err error) { - return pkgSolana.GetState(ctx, c.client, account, commitment) + getReader := func() (client.AccountReader, error) { return c.client, nil } + return pkgSolana.GetState(ctx, getReader, account, commitment) } func (c *chainReader) GetLatestTransmission(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (answer pkgSolana.Answer, blockHeight uint64, err error) { - return pkgSolana.GetLatestTransmission(ctx, c.client, account, commitment) + getReader := func() (client.AccountReader, error) { return c.client, nil } + return pkgSolana.GetLatestTransmission(ctx, getReader, account, commitment) } func (c *chainReader) GetTokenAccountBalance(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (out *rpc.GetTokenAccountBalanceResult, err error) { diff --git a/pkg/solana/cache_test.go b/pkg/solana/cache_test.go index e39bb52ad..a9126d0ac 100644 --- a/pkg/solana/cache_test.go +++ b/pkg/solana/cache_test.go @@ -106,8 +106,10 @@ func TestGetState(t *testing.T) { })) defer mockServer.Close() + reader := testSetupReader(t, mockServer.URL) + getReader := func() (client.AccountReader, error) { return reader, nil } // happy path does not error (actual state decoding handled in types_test) - _, _, err := GetState(context.TODO(), testSetupReader(t, mockServer.URL), solana.PublicKey{}, "") + _, _, err := GetState(context.TODO(), getReader, solana.PublicKey{}, "") require.NoError(t, err) } @@ -133,17 +135,18 @@ func TestGetLatestTransmission(t *testing.T) { defer mockServer.Close() reader := testSetupReader(t, mockServer.URL) - a, _, err := GetLatestTransmission(context.TODO(), reader, solana.PublicKey{}, "") + getReader := func() (client.AccountReader, error) { return reader, nil } + a, _, err := GetLatestTransmission(context.TODO(), getReader, solana.PublicKey{}, "") assert.NoError(t, err) assert.Equal(t, expectedTime, a.Timestamp) assert.Equal(t, expectedAns, a.Data.String()) // fail if returned transmission header is too short - _, _, err = GetLatestTransmission(context.TODO(), reader, solana.PublicKey{}, "") + _, _, err = GetLatestTransmission(context.TODO(), getReader, solana.PublicKey{}, "") assert.Error(t, err) // fail if returned transmission is too short - _, _, err = GetLatestTransmission(context.TODO(), reader, solana.PublicKey{}, "") + _, _, err = GetLatestTransmission(context.TODO(), getReader, solana.PublicKey{}, "") assert.Error(t, err) } @@ -166,12 +169,15 @@ func TestCache(t *testing.T) { w.Write(testTransmissionsResponse(t, body, 0)) //nolint:errcheck })) + reader := testSetupReader(t, mockServer.URL) + getAccountReader := func() (client.AccountReader, error) { return reader, nil } + lggr := logger.Test(t) stateCache := NewStateCache( solana.MustPublicKeyFromBase58("11111111111111111111111111111111"), "test-chain-id", config.NewDefault(), - testSetupReader(t, mockServer.URL), + getAccountReader, lggr, ) require.NoError(t, stateCache.Start(ctx)) @@ -186,7 +192,7 @@ func TestCache(t *testing.T) { solana.MustPublicKeyFromBase58("11111111111111111111111111111112"), "test-chain-id", config.NewDefault(), - testSetupReader(t, mockServer.URL), + getAccountReader, lggr, ) require.NoError(t, transmissionsCache.Start(ctx)) @@ -220,17 +226,19 @@ func TestNilPointerHandling(t *testing.T) { defer mockServer.Close() errString := "nil pointer returned in " + reader := testSetupReader(t, mockServer.URL) + getReader := func() (client.AccountReader, error) { return reader, nil } // fail on get state query - _, _, err := GetState(context.TODO(), reader, solana.PublicKey{}, "") + _, _, err := GetState(context.TODO(), getReader, solana.PublicKey{}, "") assert.EqualError(t, err, errString+"GetState.GetAccountInfoWithOpts") // fail on transmissions header query - _, _, err = GetLatestTransmission(context.TODO(), reader, solana.PublicKey{}, "") + _, _, err = GetLatestTransmission(context.TODO(), getReader, solana.PublicKey{}, "") assert.EqualError(t, err, errString+"GetLatestTransmission.GetAccountInfoWithOpts.Header") passFirst = true // allow proper response for header query, fail on transmission - _, _, err = GetLatestTransmission(context.TODO(), reader, solana.PublicKey{}, "") + _, _, err = GetLatestTransmission(context.TODO(), getReader, solana.PublicKey{}, "") assert.EqualError(t, err, errString+"GetLatestTransmission.GetAccountInfoWithOpts.Transmission") } diff --git a/pkg/solana/chain.go b/pkg/solana/chain.go index 55b199912..ab901a548 100644 --- a/pkg/solana/chain.go +++ b/pkg/solana/chain.go @@ -296,10 +296,7 @@ func newChain(id string, cfg *config.TOMLConfig, ks loop.Keystore, lggr logger.L if result == nil { return solanago.Signature{}, errors.New("tx sender returned nil result") } - if result.Error() != nil { - return solanago.Signature{}, result.Error() - } - return result.Signature(), result.TxError() + return result.Signature(), result.Error() } tc = internal.NewLoader[client.ReaderWriter](func() (client.ReaderWriter, error) { return ch.multiNode.SelectRPC() }) diff --git a/pkg/solana/chain_test.go b/pkg/solana/chain_test.go index b5e9adaf8..db011d80e 100644 --- a/pkg/solana/chain_test.go +++ b/pkg/solana/chain_test.go @@ -472,8 +472,7 @@ func TestChain_MultiNode_TransactionSender(t *testing.T) { // Send tx using transaction sender result := c.txSender.SendTransaction(ctx, unsignedTx(receiver.PublicKey())) require.NotNil(t, result) - require.NoError(t, result.Error()) - require.Error(t, result.TxError()) + require.Error(t, result.Error()) require.Equal(t, mn.Fatal, result.Code()) require.Empty(t, result.Signature()) }) @@ -481,8 +480,7 @@ func TestChain_MultiNode_TransactionSender(t *testing.T) { t.Run("empty transaction", func(t *testing.T) { result := c.txSender.SendTransaction(ctx, &solana.Transaction{}) require.NotNil(t, result) - require.NoError(t, result.Error()) - require.Error(t, result.TxError()) + require.Error(t, result.Error()) require.Equal(t, mn.Fatal, result.Code()) require.Empty(t, result.Signature()) }) diff --git a/pkg/solana/client/multinode/multi_node.go b/pkg/solana/client/multinode/multi_node.go index bd97ebc7b..92a65912b 100644 --- a/pkg/solana/client/multinode/multi_node.go +++ b/pkg/solana/client/multinode/multi_node.go @@ -372,6 +372,6 @@ func (c *MultiNode[CHAIN_ID, RPC]) report(nodesStateInfo []nodeWithState) { c.lggr.Criticalw(rerr.Error(), "nodeStates", nodesStateInfo) c.SvcErrBuffer.Append(rerr) } else if dead > 0 { - c.lggr.Errorw(fmt.Sprintf("At least one primary node is dead: %d/%d nodes are alive", live, total), "nodeStates", nodesStateInfo) + c.lggr.Warnw(fmt.Sprintf("At least one primary node is dead: %d/%d nodes are alive", live, total), "nodeStates", nodesStateInfo) } } diff --git a/pkg/solana/client/multinode/node_lifecycle.go b/pkg/solana/client/multinode/node_lifecycle.go index d6b150690..bca637a22 100644 --- a/pkg/solana/client/multinode/node_lifecycle.go +++ b/pkg/solana/client/multinode/node_lifecycle.go @@ -128,6 +128,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } } + // Get the latest chain info to use as local highest localHighestChainInfo, _ := n.rpc.GetInterceptedChainInfo() var pollFailures uint32 @@ -164,10 +165,8 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { n.declareUnreachable() return } - _, latestChainInfo := n.StateAndLatest() - if outOfSync, liveNodes := n.isOutOfSyncWithPool(latestChainInfo); outOfSync { + if outOfSync, liveNodes := n.isOutOfSyncWithPool(); outOfSync { // note: there must be another live node for us to be out of sync - lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", latestChainInfo.BlockNumber, "totalDifficulty", latestChainInfo.TotalDifficulty, "nodeState", n.getCachedState()) if liveNodes < 2 { lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) continue @@ -306,9 +305,9 @@ func (n *node[CHAIN_ID, HEAD, RPC]) onNewFinalizedHead(lggr logger.SugaredLogger } latestFinalizedBN := latestFinalized.BlockNumber() - lggr.Tracew("Got latest finalized head", "latestFinalized", latestFinalized) + lggr.Debugw("Got latest finalized head", "latestFinalized", latestFinalized) if latestFinalizedBN <= chainInfo.FinalizedBlockNumber { - lggr.Tracew("Ignoring previously seen finalized block number") + lggr.Debugw("Ignoring previously seen finalized block number") return false } @@ -324,10 +323,10 @@ func (n *node[CHAIN_ID, HEAD, RPC]) onNewHead(lggr logger.SugaredLogger, chainIn } promPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() - lggr.Tracew("Got head", "head", head) + lggr.Debugw("Got head", "head", head) lggr = lggr.With("latestReceivedBlockNumber", chainInfo.BlockNumber, "blockNumber", head.BlockNumber(), "nodeState", n.getCachedState()) if head.BlockNumber() <= chainInfo.BlockNumber { - lggr.Tracew("Ignoring previously seen block number") + lggr.Debugw("Ignoring previously seen block number") return false } @@ -354,7 +353,7 @@ const ( // isOutOfSyncWithPool returns outOfSync true if num or td is more than SyncThresold behind the best node. // Always returns outOfSync false for SyncThreshold 0. // liveNodes is only included when outOfSync is true. -func (n *node[CHAIN_ID, HEAD, RPC]) isOutOfSyncWithPool(localState ChainInfo) (outOfSync bool, liveNodes int) { +func (n *node[CHAIN_ID, HEAD, RPC]) isOutOfSyncWithPool() (outOfSync bool, liveNodes int) { if n.poolInfoProvider == nil { n.lfcLog.Warn("skipping sync state against the pool - should only occur in tests") return // skip for tests @@ -365,16 +364,22 @@ func (n *node[CHAIN_ID, HEAD, RPC]) isOutOfSyncWithPool(localState ChainInfo) (o } // Check against best node ln, ci := n.poolInfoProvider.LatestChainInfo() + localChainInfo, _ := n.rpc.GetInterceptedChainInfo() mode := n.nodePoolCfg.SelectionMode() switch mode { case NodeSelectionModeHighestHead, NodeSelectionModeRoundRobin, NodeSelectionModePriorityLevel: - return localState.BlockNumber < ci.BlockNumber-int64(threshold), ln + outOfSync = localChainInfo.BlockNumber < ci.BlockNumber-int64(threshold) case NodeSelectionModeTotalDifficulty: bigThreshold := big.NewInt(int64(threshold)) - return localState.TotalDifficulty.Cmp(bigmath.Sub(ci.TotalDifficulty, bigThreshold)) < 0, ln + outOfSync = localChainInfo.TotalDifficulty.Cmp(bigmath.Sub(ci.TotalDifficulty, bigThreshold)) < 0 default: panic("unrecognized NodeSelectionMode: " + mode) } + + if outOfSync && n.getCachedState() == NodeStateAlive { + n.lfcLog.Errorw("RPC endpoint has fallen behind", "blockNumber", localChainInfo.BlockNumber, "bestLatestBlockNumber", ci.BlockNumber, "totalDifficulty", localChainInfo.TotalDifficulty) + } + return outOfSync, ln } // outOfSyncLoop takes an OutOfSync node and waits until isOutOfSync returns false to go back to live status @@ -460,7 +465,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(syncIssues syncStatus) { // received a new head - clear NoNewHead flag syncIssues &= ^syncStatusNoNewHead - if outOfSync, _ := n.isOutOfSyncWithPool(localHighestChainInfo); !outOfSync { + if outOfSync, _ := n.isOutOfSyncWithPool(); !outOfSync { // we caught up with the pool - clear NotInSyncWithPool flag syncIssues &= ^syncStatusNotInSyncWithPool } else { @@ -511,7 +516,12 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(syncIssues syncStatus) { finalizedHeadsSub.ResetTimer(noNewFinalizedBlocksTimeoutThreshold) } - lggr.Debugw(msgReceivedFinalizedBlock, "blockNumber", latestFinalized.BlockNumber(), "syncIssues", syncIssues) + var highestSeen ChainInfo + if n.poolInfoProvider != nil { + highestSeen = n.poolInfoProvider.HighestUserObservations() + } + + lggr.Debugw(msgReceivedFinalizedBlock, "blockNumber", latestFinalized.BlockNumber(), "poolHighestBlockNumber", highestSeen.FinalizedBlockNumber, "syncIssues", syncIssues) case err := <-finalizedHeadsSub.Errors: lggr.Errorw("Finalized head subscription was terminated", "err", err) n.declareUnreachable() diff --git a/pkg/solana/client/multinode/poller.go b/pkg/solana/client/multinode/poller.go index 9ebe1dcfc..0ce87fade 100644 --- a/pkg/solana/client/multinode/poller.go +++ b/pkg/solana/client/multinode/poller.go @@ -65,7 +65,7 @@ func (p *Poller[T]) Err() <-chan error { } func (p *Poller[T]) pollingLoop(ctx context.Context) { - ticker := time.NewTicker(p.pollingInterval) + ticker := services.NewTicker(p.pollingInterval) defer ticker.Stop() for { diff --git a/pkg/solana/client/multinode/transaction_sender.go b/pkg/solana/client/multinode/transaction_sender.go index bd11a71a5..06b2e18be 100644 --- a/pkg/solana/client/multinode/transaction_sender.go +++ b/pkg/solana/client/multinode/transaction_sender.go @@ -26,7 +26,6 @@ var ( type SendTxResult interface { Code() SendTxReturnCode - TxError() error Error() error } @@ -92,89 +91,84 @@ type TransactionSender[TX any, RESULT SendTxResult, CHAIN_ID ID, RPC SendTxRPCCl // * If there is both success and terminal error - returns success and reports invariant violation // * Otherwise, returns any (effectively random) of the errors. func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) SendTransaction(ctx context.Context, tx TX) RESULT { - txResults := make(chan RESULT) - txResultsToReport := make(chan RESULT) - primaryNodeWg := sync.WaitGroup{} - - if txSender.State() != "Started" { - return txSender.newResult(errors.New("TransactionSender not started")) - } + var result RESULT + if !txSender.IfStarted(func() { + txResults := make(chan RESULT) + txResultsToReport := make(chan RESULT) + primaryNodeWg := sync.WaitGroup{} + + healthyNodesNum := 0 + err := txSender.multiNode.DoAll(ctx, func(ctx context.Context, rpc RPC, isSendOnly bool) { + if isSendOnly { + txSender.wg.Add(1) + go func(ctx context.Context) { + ctx, cancel := txSender.chStop.Ctx(context.WithoutCancel(ctx)) + defer cancel() + defer txSender.wg.Done() + // Send-only nodes' results are ignored as they tend to return false-positive responses. + // Broadcast to them is necessary to speed up the propagation of TX in the network. + _ = txSender.broadcastTxAsync(ctx, rpc, tx) + }(ctx) + return + } - txSenderCtx, cancel := txSender.chStop.NewCtx() - reportWg := sync.WaitGroup{} - defer func() { + // Primary Nodes + healthyNodesNum++ + primaryNodeWg.Add(1) + go func(ctx context.Context) { + ctx, cancel := txSender.chStop.Ctx(context.WithoutCancel(ctx)) + defer cancel() + defer primaryNodeWg.Done() + r := txSender.broadcastTxAsync(ctx, rpc, tx) + select { + case <-ctx.Done(): + return + case txResults <- r: + } + + select { + case <-ctx.Done(): + return + case txResultsToReport <- r: + } + }(ctx) + }) + + // This needs to be done in parallel so the reporting knows when it's done (when the channel is closed) + txSender.wg.Add(1) go func() { - reportWg.Wait() - cancel() + defer txSender.wg.Done() + primaryNodeWg.Wait() + close(txResultsToReport) + close(txResults) }() - }() - - healthyNodesNum := 0 - err := txSender.multiNode.DoAll(txSenderCtx, func(ctx context.Context, rpc RPC, isSendOnly bool) { - if isSendOnly { - txSender.wg.Add(1) - go func() { - defer txSender.wg.Done() - // Send-only nodes' results are ignored as they tend to return false-positive responses. - // Broadcast to them is necessary to speed up the propagation of TX in the network. - _ = txSender.broadcastTxAsync(ctx, rpc, tx) - }() + + if err != nil { + result = txSender.newResult(err) return } - // Primary Nodes - healthyNodesNum++ - primaryNodeWg.Add(1) - go func() { - defer primaryNodeWg.Done() - r := txSender.broadcastTxAsync(ctx, rpc, tx) - select { - case <-ctx.Done(): - return - case txResults <- r: - } + txSender.wg.Add(1) + go txSender.reportSendTxAnomalies(ctx, tx, txResultsToReport) - select { - case <-ctx.Done(): - return - case txResultsToReport <- r: - } - }() - }) - - // This needs to be done in parallel so the reporting knows when it's done (when the channel is closed) - txSender.wg.Add(1) - go func() { - defer txSender.wg.Done() - primaryNodeWg.Wait() - close(txResultsToReport) - close(txResults) - }() - - if err != nil { - return txSender.newResult(err) + result = txSender.collectTxResults(ctx, tx, healthyNodesNum, txResults) + }) { + result = txSender.newResult(errors.New("TransactionSender not started")) } - txSender.wg.Add(1) - reportWg.Add(1) - go func() { - defer reportWg.Done() - txSender.reportSendTxAnomalies(tx, txResultsToReport) - }() - - return txSender.collectTxResults(ctx, tx, healthyNodesNum, txResults) + return result } func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) broadcastTxAsync(ctx context.Context, rpc RPC, tx TX) RESULT { result := rpc.SendTransaction(ctx, tx) - txSender.lggr.Debugw("Node sent transaction", "tx", tx, "err", result.TxError()) - if !slices.Contains(sendTxSuccessfulCodes, result.Code()) { - txSender.lggr.Warnw("RPC returned error", "tx", tx, "err", result.TxError()) + txSender.lggr.Debugw("Node sent transaction", "tx", tx, "err", result.Error()) + if !slices.Contains(sendTxSuccessfulCodes, result.Code()) && ctx.Err() == nil { + txSender.lggr.Warnw("RPC returned error", "tx", tx, "err", result.Error()) } return result } -func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) reportSendTxAnomalies(tx TX, txResults <-chan RESULT) { +func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) reportSendTxAnomalies(ctx context.Context, tx TX, txResults <-chan RESULT) { defer txSender.wg.Done() resultsByCode := sendTxResults[RESULT]{} // txResults eventually will be closed @@ -183,7 +177,7 @@ func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) reportSendTxAnomal } _, criticalErr := aggregateTxResults[RESULT](resultsByCode) - if criticalErr != nil { + if criticalErr != nil && ctx.Err() == nil { txSender.lggr.Criticalw("observed invariant violation on SendTransaction", "tx", tx, "resultsByCode", resultsByCode, "err", criticalErr) PromMultiNodeInvariantViolations.WithLabelValues(txSender.chainFamily, txSender.chainID.String(), criticalErr.Error()).Inc() } @@ -256,6 +250,7 @@ loop: // ignore critical error as it's reported in reportSendTxAnomalies result, _ := aggregateTxResults(errorsByCode) + txSender.lggr.Debugw("Collected results", "errorsByCode", errorsByCode, "result", result) return result } @@ -267,6 +262,7 @@ func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) Start(ctx context. func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) Close() error { return txSender.StopOnce("TransactionSender", func() error { + txSender.lggr.Debug("Closing TransactionSender") close(txSender.chStop) txSender.wg.Wait() return nil diff --git a/pkg/solana/client/multinode_client.go b/pkg/solana/client/multinode_client.go index 0a68b78f6..e6a70de9c 100644 --- a/pkg/solana/client/multinode_client.go +++ b/pkg/solana/client/multinode_client.go @@ -37,7 +37,7 @@ func (h *Head) BlockDifficulty() *big.Int { } func (h *Head) IsValid() bool { - return h != nil && h.BlockHeight != nil && h.BlockHash != nil + return h != nil && h.BlockHeight != nil && *h.BlockHeight > 0 && h.BlockHash != nil } var _ mn.RPCClient[mn.StringID, *Head] = (*MultiNodeClient)(nil) @@ -102,12 +102,19 @@ func (m *MultiNodeClient) SubscribeToHeads(ctx context.Context) (<-chan *Head, m ctx, cancel, chStopInFlight, _ := m.acquireQueryCtx(ctx, m.cfg.TxTimeout()) defer cancel() - pollInterval := m.cfg.MultiNode.PollInterval() + // TODO: BCFR-1070 - Add BlockPollInterval + pollInterval := m.cfg.MultiNode.FinalizedBlockPollInterval() // Use same interval as finalized polling if pollInterval == 0 { return nil, nil, errors.New("PollInterval is 0") } timeout := pollInterval - poller, channel := mn.NewPoller[*Head](pollInterval, m.LatestBlock, timeout, m.log) + poller, channel := mn.NewPoller[*Head](pollInterval, func(pollRequestCtx context.Context) (*Head, error) { + if mn.CtxIsHeathCheckRequest(ctx) { + pollRequestCtx = mn.CtxAddHealthCheckFlag(pollRequestCtx) + } + return m.LatestBlock(pollRequestCtx) + }, timeout, m.log) + if err := poller.Start(ctx); err != nil { return nil, nil, err } @@ -130,7 +137,12 @@ func (m *MultiNodeClient) SubscribeToFinalizedHeads(ctx context.Context) (<-chan return nil, nil, errors.New("FinalizedBlockPollInterval is 0") } timeout := finalizedBlockPollInterval - poller, channel := mn.NewPoller[*Head](finalizedBlockPollInterval, m.LatestFinalizedBlock, timeout, m.log) + poller, channel := mn.NewPoller[*Head](finalizedBlockPollInterval, func(pollRequestCtx context.Context) (*Head, error) { + if mn.CtxIsHeathCheckRequest(ctx) { + pollRequestCtx = mn.CtxAddHealthCheckFlag(pollRequestCtx) + } + return m.LatestFinalizedBlock(pollRequestCtx) + }, timeout, m.log) if err := poller.Start(ctx); err != nil { return nil, nil, err } @@ -158,6 +170,10 @@ func (m *MultiNodeClient) LatestBlock(ctx context.Context) (*Head, error) { BlockHeight: &result.Value.LastValidBlockHeight, BlockHash: &result.Value.Blockhash, } + if !head.IsValid() { + return nil, errors.New("invalid head") + } + m.onNewHead(ctx, chStopInFlight, head) return head, nil } @@ -175,6 +191,10 @@ func (m *MultiNodeClient) LatestFinalizedBlock(ctx context.Context) (*Head, erro BlockHeight: &result.Value.LastValidBlockHeight, BlockHash: &result.Value.Blockhash, } + if !head.IsValid() { + return nil, errors.New("invalid head") + } + m.onNewFinalizedHead(ctx, chStopInFlight, head) return head, nil } @@ -301,18 +321,16 @@ func (m *MultiNodeClient) GetInterceptedChainInfo() (latest, highestUserObservat } type SendTxResult struct { - err error - txErr error - code mn.SendTxReturnCode - sig solana.Signature + err error + code mn.SendTxReturnCode + sig solana.Signature } var _ mn.SendTxResult = (*SendTxResult)(nil) func NewSendTxResult(err error) *SendTxResult { result := &SendTxResult{ - err: err, - txErr: err, + err: err, } result.code = ClassifySendError(nil, err) return result @@ -322,10 +340,6 @@ func (r *SendTxResult) Error() error { return r.err } -func (r *SendTxResult) TxError() error { - return r.txErr -} - func (r *SendTxResult) Code() mn.SendTxReturnCode { return r.code } @@ -336,7 +350,7 @@ func (r *SendTxResult) Signature() solana.Signature { func (m *MultiNodeClient) SendTransaction(ctx context.Context, tx *solana.Transaction) *SendTxResult { var sendTxResult = &SendTxResult{} - sendTxResult.sig, sendTxResult.txErr = m.SendTx(ctx, tx) - sendTxResult.code = ClassifySendError(tx, sendTxResult.txErr) + sendTxResult.sig, sendTxResult.err = m.SendTx(ctx, tx) + sendTxResult.code = ClassifySendError(tx, sendTxResult.err) return sendTxResult } diff --git a/pkg/solana/config/multinode.go b/pkg/solana/config/multinode.go index 0c49d8b22..d002d489e 100644 --- a/pkg/solana/config/multinode.go +++ b/pkg/solana/config/multinode.go @@ -97,17 +97,17 @@ func (c *MultiNodeConfig) SetDefaults() { if c.MultiNode.PollFailureThreshold == nil { c.MultiNode.PollFailureThreshold = ptr(uint32(5)) } - // Poll interval is set to 10 seconds to ensure timely updates while minimizing resource usage. + // Poll interval is set to 15 seconds to ensure timely updates while minimizing resource usage. if c.MultiNode.PollInterval == nil { - c.MultiNode.PollInterval = config.MustNewDuration(10 * time.Second) + c.MultiNode.PollInterval = config.MustNewDuration(15 * time.Second) } // Selection mode defaults to priority level to enable using node priorities if c.MultiNode.SelectionMode == nil { c.MultiNode.SelectionMode = ptr(mn.NodeSelectionModePriorityLevel) } - // The sync threshold is set to 5 to allow for some flexibility in node synchronization before considering it out of sync. + // The sync threshold is set to 10 to allow for some flexibility in node synchronization before considering it out of sync. if c.MultiNode.SyncThreshold == nil { - c.MultiNode.SyncThreshold = ptr(uint32(5)) + c.MultiNode.SyncThreshold = ptr(uint32(10)) } // Lease duration is set to 1 minute by default to allow node locks for a reasonable amount of time. if c.MultiNode.LeaseDuration == nil { @@ -125,19 +125,19 @@ func (c *MultiNodeConfig) SetDefaults() { if c.MultiNode.EnforceRepeatableRead == nil { c.MultiNode.EnforceRepeatableRead = ptr(true) } - // The delay before declaring a node dead is set to 10 seconds to give nodes time to recover from temporary issues. + // The delay before declaring a node dead is set to 20 seconds to give nodes time to recover from temporary issues. if c.MultiNode.DeathDeclarationDelay == nil { - c.MultiNode.DeathDeclarationDelay = config.MustNewDuration(10 * time.Second) + c.MultiNode.DeathDeclarationDelay = config.MustNewDuration(20 * time.Second) } /* Chain Configs */ - // Threshold for no new heads is set to 10 seconds, assuming that heads should update at a reasonable pace. + // Threshold for no new heads is set to 20 seconds, assuming that heads should update at a reasonable pace. if c.MultiNode.NodeNoNewHeadsThreshold == nil { - c.MultiNode.NodeNoNewHeadsThreshold = config.MustNewDuration(10 * time.Second) + c.MultiNode.NodeNoNewHeadsThreshold = config.MustNewDuration(20 * time.Second) } - // Similar to heads, finalized heads should be updated within 10 seconds. + // Similar to heads, finalized heads should be updated within 20 seconds. if c.MultiNode.NoNewFinalizedHeadsThreshold == nil { - c.MultiNode.NoNewFinalizedHeadsThreshold = config.MustNewDuration(10 * time.Second) + c.MultiNode.NoNewFinalizedHeadsThreshold = config.MustNewDuration(20 * time.Second) } // Finality tags are used in Solana and enabled by default. if c.MultiNode.FinalityTagEnabled == nil { @@ -147,9 +147,9 @@ func (c *MultiNodeConfig) SetDefaults() { if c.MultiNode.FinalityDepth == nil { c.MultiNode.FinalityDepth = ptr(uint32(0)) } - // Finalized block offset will not be used since finality tags are enabled. + // Finalized block offset allows for RPCs to be slightly behind the finalized block. if c.MultiNode.FinalizedBlockOffset == nil { - c.MultiNode.FinalizedBlockOffset = ptr(uint32(0)) + c.MultiNode.FinalizedBlockOffset = ptr(uint32(50)) } } diff --git a/pkg/solana/config_tracker.go b/pkg/solana/config_tracker.go index 998790b45..3ddff2715 100644 --- a/pkg/solana/config_tracker.go +++ b/pkg/solana/config_tracker.go @@ -5,13 +5,11 @@ import ( "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median" "github.com/smartcontractkit/libocr/offchainreporting2/types" - - "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" ) type ConfigTracker struct { stateCache *StateCache - reader client.Reader + getReader GetReader } func (c *ConfigTracker) Notify() <-chan struct{} { @@ -75,5 +73,9 @@ func (c *ConfigTracker) LatestConfig(ctx context.Context, changedInBlock uint64) // LatestBlockHeight returns the height of the most recent block in the chain. func (c *ConfigTracker) LatestBlockHeight(ctx context.Context) (blockHeight uint64, err error) { - return c.reader.SlotHeight(ctx) // this returns the latest slot height through CommitmentProcessed + reader, err := c.getReader() + if err != nil { + return 0, err + } + return reader.SlotHeight(ctx) // this returns the latest slot height through CommitmentProcessed } diff --git a/pkg/solana/config_tracker_test.go b/pkg/solana/config_tracker_test.go index 1e88d4ecd..d0e2d8625 100644 --- a/pkg/solana/config_tracker_test.go +++ b/pkg/solana/config_tracker_test.go @@ -8,6 +8,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" ) func TestLatestBlockHeight(t *testing.T) { @@ -19,7 +21,7 @@ func TestLatestBlockHeight(t *testing.T) { ctx := context.Background() c := &ConfigTracker{ - reader: testSetupReader(t, mockServer.URL), + getReader: func() (client.Reader, error) { return testSetupReader(t, mockServer.URL), nil }, } h, err := c.LatestBlockHeight(ctx) diff --git a/pkg/solana/relay.go b/pkg/solana/relay.go index 8266293ef..d98ab0442 100644 --- a/pkg/solana/relay.go +++ b/pkg/solana/relay.go @@ -154,7 +154,8 @@ func (r *Relayer) NewMedianProvider(ctx context.Context, rargs relaytypes.RelayA } cfg := configWatcher.chain.Config() - transmissionsCache := NewTransmissionsCache(transmissionsID, relayConfig.ChainID, cfg, configWatcher.reader, r.lggr) + getReader := func() (client.AccountReader, error) { return configWatcher.chain.Reader() } + transmissionsCache := NewTransmissionsCache(transmissionsID, relayConfig.ChainID, cfg, getReader, r.lggr) return &medianProvider{ configProvider: configWatcher, transmissionsCache: transmissionsCache, @@ -169,7 +170,7 @@ func (r *Relayer) NewMedianProvider(ctx context.Context, rargs relaytypes.RelayA storeProgramID: configWatcher.storeProgramID, transmissionsID: transmissionsID, transmissionSigner: transmitterAccount, - reader: configWatcher.reader, + getReader: configWatcher.chain.Reader, stateCache: configWatcher.stateCache, lggr: r.lggr, txManager: configWatcher.chain.TxManager(), @@ -195,7 +196,6 @@ type configProvider struct { offchainConfigDigester types.OffchainConfigDigester configTracker types.ContractConfigTracker chain Chain - reader client.Reader } func newConfigProvider(_ context.Context, lggr logger.Logger, chain Chain, args relaytypes.RelayArgs) (*configProvider, error) { @@ -222,11 +222,8 @@ func newConfigProvider(_ context.Context, lggr logger.Logger, chain Chain, args StateID: stateID, } - reader, err := chain.Reader() - if err != nil { - return nil, fmt.Errorf("error in NewMedianProvider.chain.Reader: %w", err) - } - stateCache := NewStateCache(stateID, relayConfig.ChainID, chain.Config(), reader, lggr) + getAccountReader := func() (client.AccountReader, error) { return chain.Reader() } + stateCache := NewStateCache(stateID, relayConfig.ChainID, chain.Config(), getAccountReader, lggr) return &configProvider{ chainID: relayConfig.ChainID, stateID: stateID, @@ -234,9 +231,8 @@ func newConfigProvider(_ context.Context, lggr logger.Logger, chain Chain, args storeProgramID: storeProgramID, stateCache: stateCache, offchainConfigDigester: offchainConfigDigester, - configTracker: &ConfigTracker{stateCache: stateCache, reader: reader}, + configTracker: &ConfigTracker{stateCache: stateCache, getReader: chain.Reader}, chain: chain, - reader: reader, }, nil } diff --git a/pkg/solana/state_cache.go b/pkg/solana/state_cache.go index 9faa766d0..4f6f2b084 100644 --- a/pkg/solana/state_cache.go +++ b/pkg/solana/state_cache.go @@ -23,16 +23,24 @@ type StateCache struct { *client.Cache[State] } -func NewStateCache(stateID solana.PublicKey, chainID string, cfg config.Config, reader client.Reader, lggr logger.Logger) *StateCache { +type GetReader func() (client.Reader, error) +type GetAccountReader func() (client.AccountReader, error) + +func NewStateCache(stateID solana.PublicKey, chainID string, cfg config.Config, getReader GetAccountReader, lggr logger.Logger) *StateCache { name := "ocr2_median_state" getter := func(ctx context.Context) (State, uint64, error) { - return GetState(ctx, reader, stateID, cfg.Commitment()) + return GetState(ctx, getReader, stateID, cfg.Commitment()) } return &StateCache{client.NewCache(name, stateID, chainID, cfg, getter, logger.With(lggr, "cache", name))} } -func GetState(ctx context.Context, reader client.AccountReader, account solana.PublicKey, commitment rpc.CommitmentType) (State, uint64, error) { - res, err := reader.GetAccountInfoWithOpts(ctx, account, &rpc.GetAccountInfoOpts{ +func GetState(ctx context.Context, getReader GetAccountReader, account solana.PublicKey, commitment rpc.CommitmentType) (State, uint64, error) { + r, err := getReader() + if err != nil { + return State{}, 0, fmt.Errorf("failed to get reader: %w", err) + } + + res, err := r.GetAccountInfoWithOpts(ctx, account, &rpc.GetAccountInfoOpts{ Commitment: commitment, Encoding: "base64", }) diff --git a/pkg/solana/transmissions_cache.go b/pkg/solana/transmissions_cache.go index 75ad30a6b..acc530cbb 100644 --- a/pkg/solana/transmissions_cache.go +++ b/pkg/solana/transmissions_cache.go @@ -19,19 +19,25 @@ type TransmissionsCache struct { *client.Cache[Answer] } -func NewTransmissionsCache(transmissionsID solana.PublicKey, chainID string, cfg config.Config, reader client.Reader, lggr logger.Logger) *TransmissionsCache { +func NewTransmissionsCache(transmissionsID solana.PublicKey, chainID string, cfg config.Config, getReader GetAccountReader, lggr logger.Logger) *TransmissionsCache { name := "ocr2_median_transmissions" getter := func(ctx context.Context) (Answer, uint64, error) { - return GetLatestTransmission(ctx, reader, transmissionsID, cfg.Commitment()) + return GetLatestTransmission(ctx, getReader, transmissionsID, cfg.Commitment()) } return &TransmissionsCache{client.NewCache(name, transmissionsID, chainID, cfg, getter, logger.With(lggr, "cache", name))} } -func GetLatestTransmission(ctx context.Context, reader client.AccountReader, account solana.PublicKey, commitment rpc.CommitmentType) (Answer, uint64, error) { +func GetLatestTransmission(ctx context.Context, getReader GetAccountReader, account solana.PublicKey, commitment rpc.CommitmentType) (Answer, uint64, error) { // query for transmission header headerStart := AccountDiscriminatorLen // skip account discriminator headerLen := TransmissionsHeaderLen - res, err := reader.GetAccountInfoWithOpts(ctx, account, &rpc.GetAccountInfoOpts{ + + r, err := getReader() + if err != nil { + return Answer{}, 0, fmt.Errorf("failed to get reader: %w", err) + } + + res, err := r.GetAccountInfoWithOpts(ctx, account, &rpc.GetAccountInfoOpts{ Encoding: "base64", Commitment: commitment, DataSlice: &rpc.DataSlice{ @@ -71,7 +77,7 @@ func GetLatestTransmission(ctx context.Context, reader client.AccountReader, acc transmissionOffset := AccountDiscriminatorLen + TransmissionsHeaderMaxSize + (uint64(cursor) * transmissionLen) - res, err = reader.GetAccountInfoWithOpts(ctx, account, &rpc.GetAccountInfoOpts{ + res, err = r.GetAccountInfoWithOpts(ctx, account, &rpc.GetAccountInfoOpts{ Encoding: "base64", Commitment: commitment, DataSlice: &rpc.DataSlice{ diff --git a/pkg/solana/transmitter.go b/pkg/solana/transmitter.go index a488730d0..951e9633e 100644 --- a/pkg/solana/transmitter.go +++ b/pkg/solana/transmitter.go @@ -11,15 +11,13 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/utils" - - "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" ) var _ types.ContractTransmitter = (*Transmitter)(nil) type Transmitter struct { stateID, programID, storeProgramID, transmissionsID, transmissionSigner solana.PublicKey - reader client.Reader + getReader GetReader stateCache *StateCache lggr logger.Logger txManager TxManager @@ -32,7 +30,12 @@ func (c *Transmitter) Transmit( report types.Report, sigs []types.AttributedOnchainSignature, ) error { - blockhash, err := c.reader.LatestBlockhash(ctx) + reader, err := c.getReader() + if err != nil { + return fmt.Errorf("error on Transmit.Reader: %w", err) + } + + blockhash, err := reader.LatestBlockhash(ctx) if err != nil { return fmt.Errorf("error on Transmit.GetRecentBlockhash: %w", err) } diff --git a/pkg/solana/transmitter_test.go b/pkg/solana/transmitter_test.go index 6aef6c921..1d058d36a 100644 --- a/pkg/solana/transmitter_test.go +++ b/pkg/solana/transmitter_test.go @@ -14,6 +14,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" clientmocks "github.com/smartcontractkit/chainlink-solana/pkg/solana/client/mocks" "github.com/smartcontractkit/chainlink-solana/pkg/solana/fees" "github.com/smartcontractkit/chainlink-solana/pkg/solana/txm" @@ -68,7 +69,7 @@ func TestTransmitter_TxSize(t *testing.T) { storeProgramID: mustNewRandomPublicKey(), transmissionsID: mustNewRandomPublicKey(), transmissionSigner: signer.PublicKey(), - reader: rw, + getReader: func() (client.Reader, error) { return rw, nil }, stateCache: &StateCache{}, lggr: logger.Test(t), txManager: mockTxm, From b9bf6a3c6bcf58817f9d4d6adaba3e6f96fb644f Mon Sep 17 00:00:00 2001 From: Awbrey Hughlett Date: Mon, 18 Nov 2024 12:07:14 -0500 Subject: [PATCH 07/12] Batch Contract Read (#926) * Batch Contract Read Batch contract reads are added and multiple address support was removed. All tests now pass. * fix tests * make linter happy --- .../chainreader/account_read_binding.go | 78 +---- .../chainreader/account_read_binding_test.go | 164 ---------- pkg/solana/chainreader/batch.go | 107 +++++++ pkg/solana/chainreader/bindings.go | 104 ++---- pkg/solana/chainreader/bindings_test.go | 73 +---- pkg/solana/chainreader/chain_reader.go | 202 ++++-------- pkg/solana/chainreader/chain_reader_test.go | 301 +++++------------- pkg/solana/config/chain_reader.go | 4 +- pkg/solana/config/chain_reader_test.go | 36 +-- .../config/testChainReader_invalid.json | 4 +- pkg/solana/config/testChainReader_valid.json | 12 +- 11 files changed, 313 insertions(+), 772 deletions(-) delete mode 100644 pkg/solana/chainreader/account_read_binding_test.go create mode 100644 pkg/solana/chainreader/batch.go diff --git a/pkg/solana/chainreader/account_read_binding.go b/pkg/solana/chainreader/account_read_binding.go index 128d38cd1..71ebb131b 100644 --- a/pkg/solana/chainreader/account_read_binding.go +++ b/pkg/solana/chainreader/account_read_binding.go @@ -2,7 +2,6 @@ package chainreader import ( "context" - "fmt" "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" @@ -10,94 +9,37 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/types" ) -// BinaryDataReader provides an interface for reading bytes from a source. This is likely a wrapper -// for a solana client. -type BinaryDataReader interface { - ReadAll(context.Context, solana.PublicKey, *rpc.GetAccountInfoOpts) ([]byte, error) -} - // accountReadBinding provides decoding and reading Solana Account data using a defined codec. The // `idlAccount` refers to the account name in the IDL for which the codec has a type mapping. type accountReadBinding struct { idlAccount string codec types.RemoteCodec - reader BinaryDataReader + key solana.PublicKey opts *rpc.GetAccountInfoOpts } -func newAccountReadBinding(acct string, codec types.RemoteCodec, reader BinaryDataReader, opts *rpc.GetAccountInfoOpts) *accountReadBinding { +func newAccountReadBinding(acct string, codec types.RemoteCodec, opts *rpc.GetAccountInfoOpts) *accountReadBinding { return &accountReadBinding{ idlAccount: acct, codec: codec, - reader: reader, opts: opts, } } var _ readBinding = &accountReadBinding{} -func (b *accountReadBinding) PreLoad(ctx context.Context, address string, result *loadedResult) { - if result == nil { - return - } - - account, err := solana.PublicKeyFromBase58(address) - if err != nil { - result.err <- err - - return - } - - bts, err := b.reader.ReadAll(ctx, account, b.opts) - if err != nil { - result.err <- fmt.Errorf("%w: failed to get binary data", err) - - return - } - - select { - case <-ctx.Done(): - result.err <- ctx.Err() - default: - result.value <- bts - } +func (b *accountReadBinding) SetAddress(key solana.PublicKey) { + b.key = key } -func (b *accountReadBinding) GetLatestValue(ctx context.Context, address string, _ any, outVal any, result *loadedResult) error { - var ( - bts []byte - err error - ) - - if result != nil { - // when preloading, the process will wait for one of three conditions: - // 1. the context ends and returns an error - // 2. bytes were loaded in the bytes channel - // 3. an error was loaded in the err channel - select { - case <-ctx.Done(): - err = ctx.Err() - case bts = <-result.value: - case err = <-result.err: - } - - if err != nil { - return err - } - } else { - account, err := solana.PublicKeyFromBase58(address) - if err != nil { - return err - } - - if bts, err = b.reader.ReadAll(ctx, account, b.opts); err != nil { - return fmt.Errorf("%w: failed to get binary data", err) - } - } - - return b.codec.Decode(ctx, bts, outVal, b.idlAccount) +func (b *accountReadBinding) GetAddress() solana.PublicKey { + return b.key } func (b *accountReadBinding) CreateType(_ bool) (any, error) { return b.codec.CreateType(b.idlAccount, false) } + +func (b *accountReadBinding) Decode(ctx context.Context, bts []byte, outVal any) error { + return b.codec.Decode(ctx, bts, outVal, b.idlAccount) +} diff --git a/pkg/solana/chainreader/account_read_binding_test.go b/pkg/solana/chainreader/account_read_binding_test.go deleted file mode 100644 index 3ea899cc2..000000000 --- a/pkg/solana/chainreader/account_read_binding_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package chainreader - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/gagliardetto/solana-go" - "github.com/gagliardetto/solana-go/rpc" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/smartcontractkit/chainlink-common/pkg/codec/encodings" - "github.com/smartcontractkit/chainlink-common/pkg/codec/encodings/binary" - "github.com/smartcontractkit/chainlink-common/pkg/types" -) - -func TestPreload(t *testing.T) { - t.Parallel() - - testCodec := makeTestCodec(t) - - t.Run("get latest value waits for preload", func(t *testing.T) { - t.Parallel() - - reader := new(mockReader) - binding := newAccountReadBinding(testCodecKey, testCodec, reader, nil) - - expected := testStruct{A: true, B: 42} - bts, err := testCodec.Encode(context.Background(), expected, testCodecKey) - - require.NoError(t, err) - - reader.On("ReadAll", mock.Anything, mock.Anything, mock.Anything).Return(bts, nil).After(time.Second) - - ctx := context.Background() - start := time.Now() - loaded := &loadedResult{ - value: make(chan []byte, 1), - err: make(chan error, 1), - } - - pubKey := solana.NewWallet().PublicKey() - - binding.PreLoad(ctx, pubKey.String(), loaded) - - var result testStruct - - err = binding.GetLatestValue(ctx, pubKey.String(), nil, &result, loaded) - elapsed := time.Since(start) - - require.NoError(t, err) - assert.GreaterOrEqual(t, elapsed, time.Second) - assert.Less(t, elapsed, 1100*time.Millisecond) - assert.Equal(t, expected, result) - }) - - t.Run("cancelled context exits preload and returns error on get latest value", func(t *testing.T) { - t.Parallel() - - reader := new(mockReader) - binding := newAccountReadBinding(testCodecKey, testCodec, reader, nil) - - ctx, cancel := context.WithCancelCause(context.Background()) - - // make the readall pause until after the context is cancelled - reader.On("ReadAll", mock.Anything, mock.Anything, mock.Anything). - Return([]byte{}, nil). - After(600 * time.Millisecond) - - expectedErr := errors.New("test error") - go func() { - time.Sleep(500 * time.Millisecond) - cancel(expectedErr) - }() - - pubKey := solana.NewWallet().PublicKey() - loaded := &loadedResult{ - value: make(chan []byte, 1), - err: make(chan error, 1), - } - start := time.Now() - binding.PreLoad(ctx, pubKey.String(), loaded) - - var result testStruct - err := binding.GetLatestValue(ctx, pubKey.String(), nil, &result, loaded) - elapsed := time.Since(start) - - assert.ErrorIs(t, err, ctx.Err()) - assert.ErrorIs(t, context.Cause(ctx), expectedErr) - assert.GreaterOrEqual(t, elapsed, 600*time.Millisecond) - assert.Less(t, elapsed, 700*time.Millisecond) - }) - - t.Run("error from preload is returned in get latest value", func(t *testing.T) { - t.Parallel() - - reader := new(mockReader) - binding := newAccountReadBinding(testCodecKey, testCodec, reader, nil) - ctx := context.Background() - expectedErr := errors.New("test error") - - reader.On("ReadAll", mock.Anything, mock.Anything, mock.Anything). - Return([]byte{}, expectedErr) - - pubKey := solana.NewWallet().PublicKey() - loaded := &loadedResult{ - value: make(chan []byte, 1), - err: make(chan error, 1), - } - binding.PreLoad(ctx, pubKey.String(), loaded) - - var result testStruct - err := binding.GetLatestValue(ctx, pubKey.String(), nil, &result, loaded) - - assert.ErrorIs(t, err, expectedErr) - }) -} - -type mockReader struct { - mock.Mock -} - -func (_m *mockReader) ReadAll(ctx context.Context, pk solana.PublicKey, opts *rpc.GetAccountInfoOpts) ([]byte, error) { - ret := _m.Called(ctx, pk) - - var r0 []byte - if val, ok := ret.Get(0).([]byte); ok { - r0 = val - } - - var r1 error - if fn, ok := ret.Get(1).(func() error); ok { - r1 = fn() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type testStruct struct { - A bool - B int64 -} - -const testCodecKey = "TEST" - -func makeTestCodec(t *testing.T) types.RemoteCodec { - t.Helper() - - builder := binary.LittleEndian() - - structCodec, err := encodings.NewStructCodec([]encodings.NamedTypeCodec{ - {Name: "A", Codec: builder.Bool()}, - {Name: "B", Codec: builder.Int64()}, - }) - - require.NoError(t, err) - - return encodings.CodecFromTypeCodec(map[string]encodings.TypeCodec{testCodecKey: structCodec}) -} diff --git a/pkg/solana/chainreader/batch.go b/pkg/solana/chainreader/batch.go new file mode 100644 index 000000000..43e4971b9 --- /dev/null +++ b/pkg/solana/chainreader/batch.go @@ -0,0 +1,107 @@ +package chainreader + +import ( + "context" + "errors" + + "github.com/gagliardetto/solana-go" + + "github.com/smartcontractkit/chainlink-common/pkg/values" +) + +type call struct { + ContractName, ReadName string + Params, ReturnVal any +} + +type batchResultWithErr struct { + address string + contractName, readName string + returnVal any + err error +} + +var ( + ErrMissingAccountData = errors.New("account data not found") +) + +type MultipleAccountGetter interface { + GetMultipleAccountData(context.Context, ...solana.PublicKey) ([][]byte, error) +} + +func doMethodBatchCall(ctx context.Context, client MultipleAccountGetter, bindings namespaceBindings, batch []call) ([]batchResultWithErr, error) { + // Create the list of public keys to fetch + keys := make([]solana.PublicKey, len(batch)) + for idx, call := range batch { + binding, err := bindings.GetReadBinding(call.ContractName, call.ReadName) + if err != nil { + return nil, err + } + + keys[idx] = binding.GetAddress() + } + + // Fetch the account data + data, err := client.GetMultipleAccountData(ctx, keys...) + if err != nil { + return nil, err + } + + results := make([]batchResultWithErr, len(batch)) + + // decode batch call results + for idx, call := range batch { + results[idx] = batchResultWithErr{ + address: keys[idx].String(), + contractName: call.ContractName, + readName: call.ReadName, + returnVal: call.ReturnVal, + } + + if data[idx] == nil || len(data[idx]) == 0 { + results[idx].err = ErrMissingAccountData + + continue + } + + binding, err := bindings.GetReadBinding(results[idx].contractName, results[idx].readName) + if err != nil { + results[idx].err = err + + continue + } + + ptrToValue, isValue := call.ReturnVal.(*values.Value) + if !isValue { + results[idx].err = errors.Join( + results[idx].err, + binding.Decode(ctx, data[idx], results[idx].returnVal), + ) + + continue + } + + contractType, err := binding.CreateType(false) + if err != nil { + results[idx].err = err + + continue + } + + results[idx].err = errors.Join( + results[idx].err, + binding.Decode(ctx, data[idx], contractType), + ) + + value, err := values.Wrap(contractType) + if err != nil { + results[idx].err = errors.Join(results[idx].err, err) + + continue + } + + *ptrToValue = value + } + + return results, nil +} diff --git a/pkg/solana/chainreader/bindings.go b/pkg/solana/chainreader/bindings.go index 39eb07f8a..51cc8980a 100644 --- a/pkg/solana/chainreader/bindings.go +++ b/pkg/solana/chainreader/bindings.go @@ -3,7 +3,6 @@ package chainreader import ( "context" "fmt" - "reflect" "github.com/gagliardetto/solana-go" @@ -11,128 +10,63 @@ import ( ) type readBinding interface { - PreLoad(context.Context, string, *loadedResult) - GetLatestValue(ctx context.Context, address string, params, returnVal any, preload *loadedResult) error + SetAddress(solana.PublicKey) + GetAddress() solana.PublicKey CreateType(bool) (any, error) + Decode(context.Context, []byte, any) error } // key is namespace -type namespaceBindings map[string]methodBindings +type namespaceBindings map[string]readNameBindings // key is method name -type methodBindings map[string]readBindings +type readNameBindings map[string]readBinding -// read bindings is a list of bindings by index -type readBindings []readBinding - -func (b namespaceBindings) AddReadBinding(namespace, methodName string, reader readBinding) { - nbs, nbsExists := b[namespace] - if !nbsExists { - nbs = methodBindings{} - b[namespace] = nbs - } - - rbs, rbsExists := nbs[methodName] - if !rbsExists { - rbs = []readBinding{} +func (b namespaceBindings) AddReadBinding(namespace, readName string, reader readBinding) { + if _, nbsExists := b[namespace]; !nbsExists { + b[namespace] = readNameBindings{} } - b[namespace][methodName] = append(rbs, reader) + b[namespace][readName] = reader } -func (b namespaceBindings) GetReadBindings(namespace, methodName string) ([]readBinding, error) { +func (b namespaceBindings) GetReadBinding(namespace, readName string) (readBinding, error) { nbs, nbsExists := b[namespace] if !nbsExists { return nil, fmt.Errorf("%w: no read binding exists for %s", types.ErrInvalidConfig, namespace) } - rbs, rbsExists := nbs[methodName] + rbs, rbsExists := nbs[readName] if !rbsExists { - return nil, fmt.Errorf("%w: no read binding exists for %s and %s", types.ErrInvalidConfig, namespace, methodName) + return nil, fmt.Errorf("%w: no read binding exists for %s and %s", types.ErrInvalidConfig, namespace, readName) } return rbs, nil } -func (b namespaceBindings) CreateType(namespace, methodName string, forEncoding bool) (any, error) { - bindings, err := b.GetReadBindings(namespace, methodName) +func (b namespaceBindings) CreateType(namespace, readName string, forEncoding bool) (any, error) { + binding, err := b.GetReadBinding(namespace, readName) if err != nil { return nil, err } - if len(bindings) == 1 { - // get the item type from the binding codec - return bindings[0].CreateType(forEncoding) - } - - // build a merged struct from all bindings - fields := make([]reflect.StructField, 0) - var fieldIdx int - fieldNames := make(map[string]struct{}) - - for _, binding := range bindings { - bindingType, err := binding.CreateType(forEncoding) - if err != nil { - return nil, err - } - - tBinding := reflect.TypeOf(bindingType) - if tBinding.Kind() == reflect.Pointer { - tBinding = tBinding.Elem() - } - - // all bindings must be structs to allow multiple bindings - if tBinding.Kind() != reflect.Struct { - return nil, fmt.Errorf("%w: support for multiple bindings only applies to all bindings having the type struct", types.ErrInvalidType) - } - - for idx := 0; idx < tBinding.NumField(); idx++ { - value := tBinding.FieldByIndex([]int{idx}) - - _, exists := fieldNames[value.Name] - if exists { - return nil, fmt.Errorf("%w: field name overlap on %s", types.ErrInvalidConfig, value.Name) - } - - field := reflect.StructField{ - Name: value.Name, - Type: value.Type, - Index: []int{fieldIdx}, - } - - fields = append(fields, field) - - fieldIdx++ - fieldNames[value.Name] = struct{}{} - } - } - - return reflect.New(reflect.StructOf(fields)).Interface(), nil + return binding.CreateType(forEncoding) } func (b namespaceBindings) Bind(binding types.BoundContract) error { - _, nbsExist := b[binding.Name] + bnd, nbsExist := b[binding.Name] if !nbsExist { return fmt.Errorf("%w: no namespace named %s", types.ErrInvalidConfig, binding.Name) } - readAddresses, err := decodeAddressMappings(binding.Address) + key, err := solana.PublicKeyFromBase58(binding.Address) if err != nil { return err } - for readName, addresses := range readAddresses { - for idx, address := range addresses { - if _, err := solana.PublicKeyFromBase58(address); err != nil { - return fmt.Errorf("%w: invalid address binding for %s at index %d: %s", types.ErrInvalidConfig, readName, idx, err.Error()) - } - } + for _, rb := range bnd { + rb.SetAddress(key) } return nil } - -type loadedResult struct { - value chan []byte - err chan error -} diff --git a/pkg/solana/chainreader/bindings_test.go b/pkg/solana/chainreader/bindings_test.go index 9ba66aa5f..d8b510648 100644 --- a/pkg/solana/chainreader/bindings_test.go +++ b/pkg/solana/chainreader/bindings_test.go @@ -2,9 +2,9 @@ package chainreader import ( "context" - "reflect" "testing" + "github.com/gagliardetto/solana-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -31,70 +31,11 @@ func TestBindings_CreateType(t *testing.T) { assert.Equal(t, expected, returned) }) - t.Run("multiple bindings return merged struct", func(t *testing.T) { + t.Run("returns error when binding does not exist", func(t *testing.T) { t.Parallel() - bindingA := new(mockBinding) - bindingB := new(mockBinding) bindings := namespaceBindings{} - bindings.AddReadBinding("A", "B", bindingA) - bindings.AddReadBinding("A", "B", bindingB) - - bindingA.On("CreateType", mock.Anything).Return(struct{ A string }{A: "test"}, nil) - bindingB.On("CreateType", mock.Anything).Return(struct{ B int }{B: 8}, nil) - - result, err := bindings.CreateType("A", "B", true) - - expected := reflect.New(reflect.StructOf([]reflect.StructField{ - {Name: "A", Type: reflect.TypeOf("")}, - {Name: "B", Type: reflect.TypeOf(0)}, - })) - - require.NoError(t, err) - assert.Equal(t, expected.Type(), reflect.TypeOf(result)) - }) - - t.Run("multiple bindings fails when not a struct", func(t *testing.T) { - t.Parallel() - - bindingA := new(mockBinding) - bindingB := new(mockBinding) - bindings := namespaceBindings{} - - bindings.AddReadBinding("A", "B", bindingA) - bindings.AddReadBinding("A", "B", bindingB) - - bindingA.On("CreateType", mock.Anything).Return(8, nil) - bindingB.On("CreateType", mock.Anything).Return(struct{ A string }{A: "test"}, nil) - - _, err := bindings.CreateType("A", "B", true) - - require.ErrorIs(t, err, types.ErrInvalidType) - }) - - t.Run("multiple bindings errors when fields overlap", func(t *testing.T) { - t.Parallel() - - bindingA := new(mockBinding) - bindingB := new(mockBinding) - bindings := namespaceBindings{} - - bindings.AddReadBinding("A", "B", bindingA) - bindings.AddReadBinding("A", "B", bindingB) - - type A struct { - A string - B int - } - - type B struct { - A int - } - - bindingA.On("CreateType", mock.Anything).Return(A{A: ""}, nil) - bindingB.On("CreateType", mock.Anything).Return(B{A: 8}, nil) - _, err := bindings.CreateType("A", "B", true) require.ErrorIs(t, err, types.ErrInvalidConfig) @@ -105,10 +46,10 @@ type mockBinding struct { mock.Mock } -func (_m *mockBinding) PreLoad(context.Context, string, *loadedResult) {} +func (_m *mockBinding) SetAddress(_ solana.PublicKey) {} -func (_m *mockBinding) GetLatestValue(ctx context.Context, address string, params, returnVal any, _ *loadedResult) error { - return nil +func (_m *mockBinding) GetAddress() solana.PublicKey { + return solana.PublicKey{} } func (_m *mockBinding) CreateType(b bool) (any, error) { @@ -116,3 +57,7 @@ func (_m *mockBinding) CreateType(b bool) (any, error) { return ret.Get(0), ret.Error(1) } + +func (_m *mockBinding) Decode(_ context.Context, _ []byte, _ any) error { + return nil +} diff --git a/pkg/solana/chainreader/chain_reader.go b/pkg/solana/chainreader/chain_reader.go index ba0093edc..d017eb25d 100644 --- a/pkg/solana/chainreader/chain_reader.go +++ b/pkg/solana/chainreader/chain_reader.go @@ -2,14 +2,12 @@ package chainreader import ( "context" - "encoding/base64" "encoding/json" "errors" "fmt" - "reflect" "sync" - ag_solana "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" codeccommon "github.com/smartcontractkit/chainlink-common/pkg/codec" @@ -18,7 +16,6 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/types" "github.com/smartcontractkit/chainlink-common/pkg/types/query" "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives" - "github.com/smartcontractkit/chainlink-common/pkg/values" "github.com/smartcontractkit/chainlink-solana/pkg/solana/codec" "github.com/smartcontractkit/chainlink-solana/pkg/solana/config" @@ -31,7 +28,7 @@ type SolanaChainReaderService struct { // provided values lggr logger.Logger - client BinaryDataReader + client MultipleAccountGetter // internal values bindings namespaceBindings @@ -48,7 +45,7 @@ var ( ) // NewChainReaderService is a constructor for a new ChainReaderService for Solana. Returns a nil service on error. -func NewChainReaderService(lggr logger.Logger, dataReader BinaryDataReader, cfg config.ChainReader) (*SolanaChainReaderService, error) { +func NewChainReaderService(lggr logger.Logger, dataReader MultipleAccountGetter, cfg config.ChainReader) (*SolanaChainReaderService, error) { svc := &SolanaChainReaderService{ lggr: logger.Named(lggr, ServiceName), client: dataReader, @@ -114,123 +111,73 @@ func (s *SolanaChainReaderService) GetLatestValue(ctx context.Context, readIdent return fmt.Errorf("%w: no contract for read identifier %s", types.ErrInvalidType, readIdentifier) } - addressMappings, err := decodeAddressMappings(vals.address) - if err != nil { - return fmt.Errorf("%w: %s", types.ErrInvalidConfig, err) - } - - addresses, ok := addressMappings[vals.readName] - if !ok { - return fmt.Errorf("%w: no addresses for readName %s", types.ErrInvalidConfig, vals.readName) + batch := []call{ + { + ContractName: vals.contract, + ReadName: vals.readName, + Params: params, + ReturnVal: returnVal, + }, } - bindings, err := s.bindings.GetReadBindings(vals.contract, vals.readName) + results, err := doMethodBatchCall(ctx, s.client, s.bindings, batch) if err != nil { return err } - if len(addresses) != len(bindings) { - return fmt.Errorf("%w: addresses and bindings lengths do not match", types.ErrInvalidConfig) + if len(results) != len(batch) { + return fmt.Errorf("%w: unexpected number of results", types.ErrInternal) } - // if the returnVal is not a *values.Value, run normally without using the ptrToValue - ptrToValue, isValue := returnVal.(*values.Value) - if !isValue { - return s.runAllBindings(ctx, bindings, addresses, params, returnVal) - } - - // if the returnVal is a *values.Value, create the type from the contract, run normally, and wrap the value - contractType, err := s.bindings.CreateType(vals.contract, vals.readName, false) - if err != nil { - return err - } - - if err = s.runAllBindings(ctx, bindings, addresses, params, contractType); err != nil { - return err - } - - value, err := values.Wrap(contractType) - if err != nil { - return err + if results[0].err != nil { + return fmt.Errorf("%w: %s", types.ErrInternal, results[0].err) } - *ptrToValue = value - return nil } -func (s *SolanaChainReaderService) runAllBindings( - ctx context.Context, - bindings []readBinding, - addresses []string, - params, returnVal any, -) error { - localCtx, localCancel := context.WithCancel(ctx) - - // the wait group ensures GetLatestValue returns only after all go-routines have completed - var wg sync.WaitGroup - - results := make(map[int]*loadedResult) - - if len(bindings) > 1 { - // might go for some guardrails when dealing with multiple bindings - // the returnVal should be compatible with multiple passes by the codec decoder - // this should only apply to types struct{} and map[any]any - tReturnVal := reflect.TypeOf(returnVal) - if tReturnVal.Kind() == reflect.Pointer { - tReturnVal = reflect.Indirect(reflect.ValueOf(returnVal)).Type() - } - - switch tReturnVal.Kind() { - case reflect.Struct, reflect.Map: - default: - localCancel() - - wg.Wait() - - return fmt.Errorf("%w: multiple bindings is only supported for struct and map", types.ErrInvalidType) +// BatchGetLatestValues implements the types.ContractReader interface. +func (s *SolanaChainReaderService) BatchGetLatestValues(ctx context.Context, request types.BatchGetLatestValuesRequest) (types.BatchGetLatestValuesResult, error) { + idxLookup := make(map[types.BoundContract][]int) + batch := []call{} + + for bound, req := range request { + idxLookup[bound] = make([]int, len(req)) + + for idx, readReq := range req { + idxLookup[bound][idx] = len(batch) + batch = append(batch, call{ + ContractName: bound.Name, + ReadName: readReq.ReadName, + Params: readReq.Params, + ReturnVal: readReq.ReturnVal, + }) } + } - // for multiple bindings, preload the remote data in parallel - for idx, binding := range bindings { - results[idx] = &loadedResult{ - value: make(chan []byte, 1), - err: make(chan error, 1), - } - - wg.Add(1) - go func(ctx context.Context, rb readBinding, res *loadedResult, address string) { - defer wg.Done() + results, err := doMethodBatchCall(ctx, s.client, s.bindings, batch) + if err != nil { + return nil, err + } - rb.PreLoad(ctx, address, res) - }(localCtx, binding, results[idx], addresses[idx]) - } + if len(results) != len(batch) { + return nil, errors.New("unexpected number of results") } - // in the case of parallel preloading, GetLatestValue will still run in - // sequence because the function will block until the data is loaded. - // in the case of no preloading, GetLatestValue will load and decode in - // sequence. - for idx, binding := range bindings { - if err := binding.GetLatestValue(ctx, addresses[idx], params, returnVal, results[idx]); err != nil { - localCancel() + result := make(types.BatchGetLatestValuesResult) - wg.Wait() + for bound, idxs := range idxLookup { + result[bound] = make(types.ContractBatchResults, len(idxs)) - return err + for idx, callIdx := range idxs { + res := types.BatchReadResult{ReadName: results[callIdx].readName} + res.SetResult(results[callIdx].returnVal, results[callIdx].err) + + result[bound][idx] = res } } - localCancel() - - wg.Wait() - - return nil -} - -// BatchGetLatestValues implements the types.ContractReader interface. -func (s *SolanaChainReaderService) BatchGetLatestValues(_ context.Context, _ types.BatchGetLatestValuesRequest) (types.BatchGetLatestValuesResult, error) { - return nil, errors.New("unimplemented") + return result, nil } // QueryKey implements the types.ContractReader interface. @@ -288,26 +235,25 @@ func (s *SolanaChainReaderService) init(namespaces map[string]config.ChainReader s.lookup.addReadNameForContract(namespace, methodName) - for _, procedure := range method.Procedures { - injectAddressModifier(procedure.OutputModifications) - - mod, err := procedure.OutputModifications.ToModifier(codec.DecoderHooks...) - if err != nil { - return err - } - - codecWithModifiers, err := codec.NewNamedModifierCodec(idlCodec, procedure.IDLAccount, mod) - if err != nil { - return err - } - - s.bindings.AddReadBinding(namespace, methodName, newAccountReadBinding( - procedure.IDLAccount, - codecWithModifiers, - s.client, - createRPCOpts(procedure.RPCOpts), - )) + procedure := method.Procedure + + injectAddressModifier(procedure.OutputModifications) + + mod, err := procedure.OutputModifications.ToModifier(codec.DecoderHooks...) + if err != nil { + return err + } + + codecWithModifiers, err := codec.NewNamedModifierCodec(idlCodec, procedure.IDLAccount, mod) + if err != nil { + return err } + + s.bindings.AddReadBinding(namespace, methodName, newAccountReadBinding( + procedure.IDLAccount, + codecWithModifiers, + createRPCOpts(procedure.RPCOpts), + )) } } @@ -353,7 +299,7 @@ func NewAccountDataReader(client *rpc.Client) *accountDataReader { return &accountDataReader{client: client} } -func (r *accountDataReader) ReadAll(ctx context.Context, pk ag_solana.PublicKey, opts *rpc.GetAccountInfoOpts) ([]byte, error) { +func (r *accountDataReader) ReadAll(ctx context.Context, pk solana.PublicKey, opts *rpc.GetAccountInfoOpts) ([]byte, error) { result, err := r.client.GetAccountInfoWithOpts(ctx, pk, opts) if err != nil { return nil, err @@ -363,19 +309,3 @@ func (r *accountDataReader) ReadAll(ctx context.Context, pk ag_solana.PublicKey, return bts, nil } - -func decodeAddressMappings(encoded string) (map[string][]string, error) { - decoded, err := base64.StdEncoding.DecodeString(encoded) - if err != nil { - return nil, err - } - - var readAddresses map[string][]string - - err = json.Unmarshal(decoded, &readAddresses) - if err != nil { - return nil, err - } - - return readAddresses, nil -} diff --git a/pkg/solana/chainreader/chain_reader_test.go b/pkg/solana/chainreader/chain_reader_test.go index 7a1255c07..165be22e2 100644 --- a/pkg/solana/chainreader/chain_reader_test.go +++ b/pkg/solana/chainreader/chain_reader_test.go @@ -2,9 +2,7 @@ package chainreader_test import ( "context" - "encoding/base64" "encoding/json" - "errors" "fmt" "math/big" "os" @@ -78,11 +76,6 @@ func TestSolanaChainReaderService_ServiceCtx(t *testing.T) { } func TestSolanaChainReaderService_GetLatestValue(t *testing.T) { - // TODO fix Solana tests - t.Skip() - - t.Parallel() - ctx := tests.Context(t) // encode values from unmodified test struct to be read and decoded @@ -107,16 +100,20 @@ func TestSolanaChainReaderService_GetLatestValue(t *testing.T) { require.NoError(t, svc.Close()) }) - client.SetNext(encoded, nil, 0) + pk := solana.NewWallet().PublicKey() + + client.SetForAddress(pk, encoded, nil, 0) var result modifiedStructWithNestedStruct binding := types.BoundContract{ Name: Namespace, - Address: "", + Address: pk.String(), } + require.NoError(t, svc.Bind(ctx, []types.BoundContract{binding})) require.NoError(t, svc.GetLatestValue(ctx, binding.ReadIdentifier(NamedMethod), primitives.Unconfirmed, nil, &result)) + assert.Equal(t, expected.InnerStruct, result.InnerStruct) assert.Equal(t, expected.Value, result.V) assert.Equal(t, expected.TimeVal, result.TimeVal) @@ -151,7 +148,11 @@ func TestSolanaChainReaderService_GetLatestValue(t *testing.T) { } assert.NoError(t, svc.Bind(ctx, []types.BoundContract{binding})) - assert.ErrorIs(t, svc.GetLatestValue(ctx, binding.ReadIdentifier(NamedMethod), primitives.Unconfirmed, nil, &result), expectedErr) + + err = svc.GetLatestValue(ctx, binding.ReadIdentifier(NamedMethod), primitives.Unconfirmed, nil, &result) + + assert.Contains(t, err.Error(), chainreader.ErrMissingAccountData.Error()) + assert.ErrorIs(t, err, types.ErrInternal) }) t.Run("Method Not Found", func(t *testing.T) { @@ -196,33 +197,6 @@ func TestSolanaChainReaderService_GetLatestValue(t *testing.T) { assert.NotNil(t, svc.GetLatestValue(ctx, types.BoundContract{Name: "Unknown"}.ReadIdentifier("Unknown"), primitives.Unconfirmed, nil, &result)) }) - t.Run("Bind Success", func(t *testing.T) { - t.Parallel() - - _, conf := newTestConfAndCodec(t) - - client := new(mockedRPCClient) - svc, err := chainreader.NewChainReaderService(logger.Test(t), client, conf) - - require.NoError(t, err) - require.NotNil(t, svc) - require.NoError(t, svc.Start(ctx)) - - t.Cleanup(func() { - require.NoError(t, svc.Close()) - }) - - pk := ag_solana.NewWallet().PublicKey() - err = svc.Bind(ctx, []types.BoundContract{ - { - Address: pk.String(), - Name: fmt.Sprintf("%s.%s.%d", Namespace, NamedMethod, 0), - }, - }) - - assert.NoError(t, err) - }) - t.Run("Bind Errors", func(t *testing.T) { t.Parallel() @@ -315,12 +289,10 @@ func newTestConfAndCodec(t *testing.T) (types.RemoteCodec, config.ChainReader) { Methods: map[string]config.ChainDataReader{ NamedMethod: { AnchorIDL: rawIDL, - Procedures: []config.ChainReaderProcedure{ - { - IDLAccount: testutils.TestStructWithNestedStruct, - OutputModifications: codeccommon.ModifiersConfig{ - &codeccommon.RenameModifierConfig{Fields: map[string]string{"Value": "V"}}, - }, + Procedure: config.ChainReaderProcedure{ + IDLAccount: testutils.TestStructWithNestedStruct, + OutputModifications: codeccommon.ModifiersConfig{ + &codeccommon.RenameModifierConfig{Fields: map[string]string{"Value": "V"}}, }, }, }, @@ -358,36 +330,21 @@ type mockedRPCClient struct { sequence []mockedRPCCall } -func (_m *mockedRPCClient) ReadAll(_ context.Context, pk ag_solana.PublicKey, _ *rpc.GetAccountInfoOpts) ([]byte, error) { - _m.mu.Lock() - defer _m.mu.Unlock() +func (_m *mockedRPCClient) GetMultipleAccountData(_ context.Context, keys ...solana.PublicKey) ([][]byte, error) { + result := make([][]byte, len(keys)) - if _m.responseByAddress == nil { - _m.responseByAddress = make(map[string]mockedRPCCall) - } + for idx, key := range keys { + call, ok := _m.responseByAddress[key.String()] + if !ok || call.err != nil { + result[idx] = nil - if resp, ok := _m.responseByAddress[pk.String()]; ok { - if resp.delay > 0 { - time.Sleep(resp.delay) + continue } - delete(_m.responseByAddress, pk.String()) - - return resp.bts, resp.err - } - - if len(_m.sequence) == 0 { - return nil, errors.New(" no values to return") - } - - next := _m.sequence[0] - _m.sequence = _m.sequence[1:len(_m.sequence)] - - if next.delay > 0 { - time.Sleep(next.delay) + result[idx] = call.bts } - return next.bts, next.err + return result, nil } func (_m *mockedRPCClient) SetNext(bts []byte, err error, delay time.Duration) { @@ -425,9 +382,13 @@ type chainReaderInterfaceTester struct { func (r *chainReaderInterfaceTester) GetAccountBytes(i int) []byte { account := [20]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + account[i%20] += byte(i) account[(i+3)%20] += byte(i + 3) - return account[:] + + pk := solana.PublicKeyFromBytes(account[:]) + + return pk.Bytes() } func (r *chainReaderInterfaceTester) GetAccountString(i int) string { @@ -456,64 +417,48 @@ func (r *chainReaderInterfaceTester) Setup(t *testing.T) { MethodTakingLatestParamsReturningTestStruct: { AnchorIDL: fullStructIDL(t), Encoding: config.EncodingTypeBorsh, - Procedures: []config.ChainReaderProcedure{ - { - IDLAccount: "TestStructB", - RPCOpts: &config.RPCOpts{ - Encoding: &encodingBase64, - Commitment: &commitment, - DataSlice: &rpc.DataSlice{ - Offset: &offset, - Length: &length, - }, + Procedure: config.ChainReaderProcedure{ + IDLAccount: "TestStruct", + RPCOpts: &config.RPCOpts{ + Encoding: &encodingBase64, + Commitment: &commitment, + DataSlice: &rpc.DataSlice{ + Offset: &offset, + Length: &length, }, }, - { - IDLAccount: "TestStructA", - }, }, }, MethodReturningUint64: { AnchorIDL: fmt.Sprintf(baseIDL, uint64BaseTypeIDL, ""), Encoding: config.EncodingTypeBorsh, - Procedures: []config.ChainReaderProcedure{ - { - IDLAccount: "SimpleUint64Value", - OutputModifications: codeccommon.ModifiersConfig{ - &codeccommon.PropertyExtractorConfig{FieldName: "I"}, - }, + Procedure: config.ChainReaderProcedure{ + IDLAccount: "SimpleUint64Value", + OutputModifications: codeccommon.ModifiersConfig{ + &codeccommon.PropertyExtractorConfig{FieldName: "I"}, }, }, }, MethodReturningUint64Slice: { AnchorIDL: fmt.Sprintf(baseIDL, uint64SliceBaseTypeIDL, ""), Encoding: config.EncodingTypeBincode, - Procedures: []config.ChainReaderProcedure{ - { - IDLAccount: "Uint64Slice", - OutputModifications: codeccommon.ModifiersConfig{ - &codeccommon.PropertyExtractorConfig{FieldName: "Vals"}, - }, + Procedure: config.ChainReaderProcedure{ + IDLAccount: "Uint64Slice", + OutputModifications: codeccommon.ModifiersConfig{ + &codeccommon.PropertyExtractorConfig{FieldName: "Vals"}, }, }, }, MethodReturningSeenStruct: { AnchorIDL: fullStructIDL(t), Encoding: config.EncodingTypeBorsh, - Procedures: []config.ChainReaderProcedure{ - { - IDLAccount: "TestStructB", - OutputModifications: codeccommon.ModifiersConfig{ - &codeccommon.AddressBytesToStringModifierConfig{ - Fields: []string{"Accountstruct.Accountstr"}, - }, - }, - }, - { - IDLAccount: "TestStructA", - OutputModifications: codeccommon.ModifiersConfig{ - &codeccommon.HardCodeModifierConfig{OffChainValues: map[string]any{"ExtraField": AnyExtraValue}}, + Procedure: config.ChainReaderProcedure{ + IDLAccount: "TestStruct", + OutputModifications: codeccommon.ModifiersConfig{ + &codeccommon.AddressBytesToStringModifierConfig{ + Fields: []string{"Accountstruct.Accountstr"}, }, + &codeccommon.HardCodeModifierConfig{OffChainValues: map[string]any{"ExtraField": AnyExtraValue}}, }, }, }, @@ -524,12 +469,10 @@ func (r *chainReaderInterfaceTester) Setup(t *testing.T) { MethodReturningUint64: { AnchorIDL: fmt.Sprintf(baseIDL, uint64BaseTypeIDL, ""), Encoding: config.EncodingTypeBorsh, - Procedures: []config.ChainReaderProcedure{ - { - IDLAccount: "SimpleUint64Value", - OutputModifications: codeccommon.ModifiersConfig{ - &codeccommon.PropertyExtractorConfig{FieldName: "I"}, - }, + Procedure: config.ChainReaderProcedure{ + IDLAccount: "SimpleUint64Value", + OutputModifications: codeccommon.ModifiersConfig{ + &codeccommon.PropertyExtractorConfig{FieldName: "I"}, }, }, }, @@ -600,9 +543,11 @@ func (r *wrappedTestChainReader) Name() string { func (r *wrappedTestChainReader) GetLatestValue(ctx context.Context, readIdentifier string, confidenceLevel primitives.ConfidenceLevel, params, returnVal any) error { var ( - a ag_solana.PublicKey - b ag_solana.PublicKey + bts []byte + acct int + err error ) + parts := strings.Split(readIdentifier, "-") if len(parts) < 3 { panic("unexpected readIdentifier length") @@ -611,6 +556,10 @@ func (r *wrappedTestChainReader) GetLatestValue(ctx context.Context, readIdentif contractName := parts[1] method := parts[2] + if contractName == AnySecondContractName { + acct = 1 + } + switch contractName + method { case AnyContractName + EventName: r.test.Skip("Events are not yet supported in Solana") @@ -622,13 +571,11 @@ func (r *wrappedTestChainReader) GetLatestValue(ctx context.Context, readIdentif I: AnyValueToReadWithoutAnArgument, } - bts, err := cdc.Encode(ctx, onChainStruct, "SimpleUint64Value") + bts, err = cdc.Encode(ctx, onChainStruct, "SimpleUint64Value") if err != nil { r.test.Log(err.Error()) r.test.FailNow() } - - r.client.SetNext(bts, nil, 0) case AnyContractName + MethodReturningUint64Slice: cdc := makeTestCodec(r.test, fmt.Sprintf(baseIDL, uint64SliceBaseTypeIDL, ""), config.EncodingTypeBincode) onChainStruct := struct { @@ -637,12 +584,10 @@ func (r *wrappedTestChainReader) GetLatestValue(ctx context.Context, readIdentif Vals: AnySliceToReadWithoutAnArgument, } - bts, err := cdc.Encode(ctx, onChainStruct, "Uint64Slice") + bts, err = cdc.Encode(ctx, onChainStruct, "Uint64Slice") if err != nil { r.test.FailNow() } - - r.client.SetNext(bts, nil, 0) case AnySecondContractName + MethodReturningUint64, AnyContractName: cdc := makeTestCodec(r.test, fmt.Sprintf(baseIDL, uint64BaseTypeIDL, ""), config.EncodingTypeBorsh) onChainStruct := struct { @@ -651,37 +596,26 @@ func (r *wrappedTestChainReader) GetLatestValue(ctx context.Context, readIdentif I: AnyDifferentValueToReadWithoutAnArgument, } - bts, err := cdc.Encode(ctx, onChainStruct, "SimpleUint64Value") + bts, err = cdc.Encode(ctx, onChainStruct, "SimpleUint64Value") if err != nil { r.test.FailNow() } - - r.client.SetNext(bts, nil, 0) case AnyContractName + MethodReturningSeenStruct: nextStruct := CreateTestStruct[*testing.T](0, r.tester) r.testStructQueue = append(r.testStructQueue, &nextStruct) - a, b = getAddresses(r.test, r.tester, AnyContractName, MethodReturningSeenStruct) - fallthrough default: - if len(r.testStructQueue) == 0 { r.test.FailNow() } - if contractName+method != AnyContractName+MethodReturningSeenStruct { - a, b = getAddresses(r.test, r.tester, AnyContractName, MethodTakingLatestParamsReturningTestStruct) - } - nextTestStruct := r.testStructQueue[0] r.testStructQueue = r.testStructQueue[1:len(r.testStructQueue)] // split into two encoded parts to test the preloading function cdc := makeTestCodec(r.test, fullStructIDL(r.test), config.EncodingTypeBorsh) - var bts []byte - var err error if strings.Contains(r.test.Name(), "wraps_config_with_modifiers_using_its_own_mapstructure_overrides") { // TODO: This is a temporary solution. We are manually retyping this struct to avoid breaking unrelated tests. // Once input modifiers are fully implemented, revisit this code and remove this manual struct conversion @@ -716,28 +650,20 @@ func (r *wrappedTestChainReader) GetLatestValue(ctx context.Context, readIdentif NestedStaticStruct: nextTestStruct.NestedStaticStruct, } - bts, err = cdc.Encode(ctx, tempStruct, "TestStructB") + bts, err = cdc.Encode(ctx, tempStruct, "TestStruct") if err != nil { r.test.FailNow() } } else { - bts, err = cdc.Encode(ctx, nextTestStruct, "TestStructB") + bts, err = cdc.Encode(ctx, nextTestStruct, "TestStruct") if err != nil { r.test.FailNow() } } - - // make part A return slower than part B - r.client.SetForAddress(a, bts, nil, 300*time.Millisecond) - - bts, err = cdc.Encode(ctx, nextTestStruct, "TestStructA") - if err != nil { - r.test.FailNow() - } - - r.client.SetForAddress(b, bts, nil, 50*time.Millisecond) } + r.client.SetForAddress(ag_solana.PublicKey(r.tester.GetAccountBytes(acct)), bts, nil, 0) + return r.service.GetLatestValue(ctx, readIdentifier, confidenceLevel, params, returnVal) } @@ -753,52 +679,6 @@ func (r *wrappedTestChainReader) QueryKey(_ context.Context, _ types.BoundContra return nil, nil } -func getAddresses(t *testing.T, tester ChainComponentsInterfaceTester[*testing.T], contractName, readName string) (ag_solana.PublicKey, ag_solana.PublicKey) { - t.Helper() - - fn := ag_solana.MustPublicKeyFromBase58 - - var ( - addresses []string - found bool - ) - - for _, binding := range tester.GetBindings(t) { - if binding.Name == contractName { - encoded, err := base64.StdEncoding.DecodeString(binding.Address) - if err != nil { - t.Logf("%s", err) - t.FailNow() - } - - var readAddresses map[string][]string - - err = json.Unmarshal(encoded, &readAddresses) - if err != nil { - t.Logf("%s", err) - t.FailNow() - } - - var ok bool - - addresses, ok = readAddresses[readName] - if !ok { - t.Log("no addresses found") - t.FailNow() - } - - found = true - } - } - - if !found { - t.Log("no addresses found") - t.FailNow() - } - - return fn(addresses[0]), fn(addresses[1]) -} - func (r *wrappedTestChainReader) Bind(ctx context.Context, bindings []types.BoundContract) error { return r.service.Bind(ctx, bindings) } @@ -849,28 +729,9 @@ func (r *chainReaderInterfaceTester) TriggerEvent(t *testing.T, testStruct *Test } func (r *chainReaderInterfaceTester) GetBindings(t *testing.T) []types.BoundContract { - mainContractMethods := map[string][]string{ - MethodTakingLatestParamsReturningTestStruct: {r.address[0], r.address[1]}, - MethodReturningUint64: {r.address[2]}, - MethodReturningUint64Slice: {r.address[3]}, - MethodReturningSeenStruct: {r.address[4], r.address[5]}, - } - - addrBts, err := json.Marshal(mainContractMethods) - if err != nil { - t.Log(err.Error()) - t.FailNow() - } - - secondAddrBts, err := json.Marshal(map[string][]string{MethodReturningUint64: {r.address[6]}}) - if err != nil { - t.Log(err.Error()) - t.FailNow() - } - return []types.BoundContract{ - {Name: AnyContractName, Address: base64.StdEncoding.EncodeToString(addrBts)}, - {Name: AnySecondContractName, Address: base64.StdEncoding.EncodeToString(secondAddrBts)}, + {Name: AnyContractName, Address: solana.PublicKeyFromBytes(r.GetAccountBytes(0)).String()}, + {Name: AnySecondContractName, Address: solana.PublicKeyFromBytes(r.GetAccountBytes(1)).String()}, } } @@ -912,7 +773,7 @@ func fullStructIDL(t *testing.T) string { return fmt.Sprintf( baseIDL, - strings.Join([]string{testStructAIDL, testStructBIDL}, ","), + testStructIDL, strings.Join([]string{midLevelDynamicStructIDL, midLevelStaticStructIDL, innerDynamicStructIDL, innerStaticStructIDL, accountStructIDL}, ","), ) } @@ -925,8 +786,8 @@ const ( "types": [%s] }` - testStructAIDL = `{ - "name": "TestStructA", + testStructIDL = `{ + "name": "TestStruct", "type": { "kind": "struct", "fields": [ @@ -934,20 +795,12 @@ const ( {"name": "differentField","type": "string"}, {"name": "bigField","type": "i128"}, {"name": "nestedDynamicStruct","type": {"defined": "MidLevelDynamicStruct"}}, - {"name": "nestedStaticStruct","type": {"defined": "MidLevelStaticStruct"}} - ] - } - }` - - testStructBIDL = `{ - "name": "TestStructB", - "type": { - "kind": "struct", - "fields": [ + {"name": "nestedStaticStruct","type": {"defined": "MidLevelStaticStruct"}}, {"name": "oracleID","type": "u8"}, {"name": "oracleIDs","type": {"array": ["u8",32]}}, {"name": "accountstruct","type": {"defined": "accountstruct"}}, {"name": "accounts","type": {"vec": "bytes"}} + ] } }` diff --git a/pkg/solana/config/chain_reader.go b/pkg/solana/config/chain_reader.go index a1fed147d..dbe9ef4ab 100644 --- a/pkg/solana/config/chain_reader.go +++ b/pkg/solana/config/chain_reader.go @@ -25,8 +25,8 @@ type ChainDataReader struct { AnchorIDL string `json:"anchorIDL" toml:"anchorIDL"` // Encoding defines the type of encoding used for on-chain data. Currently supported // are 'borsh' and 'bincode'. - Encoding EncodingType `json:"encoding" toml:"encoding"` - Procedures []ChainReaderProcedure `json:"procedures" toml:"procedures"` + Encoding EncodingType `json:"encoding" toml:"encoding"` + Procedure ChainReaderProcedure `json:"procedure" toml:"procedure"` } type EncodingType int diff --git a/pkg/solana/config/chain_reader_test.go b/pkg/solana/config/chain_reader_test.go index b0ad49181..7d290b50c 100644 --- a/pkg/solana/config/chain_reader_test.go +++ b/pkg/solana/config/chain_reader_test.go @@ -90,28 +90,24 @@ var validChainReaderConfig = config.ChainReader{ "Method": { AnchorIDL: "test idl 1", Encoding: config.EncodingTypeBorsh, - Procedures: []config.ChainReaderProcedure{ - { - IDLAccount: testutils.TestStructWithNestedStruct, - }, + Procedure: config.ChainReaderProcedure{ + IDLAccount: testutils.TestStructWithNestedStruct, }, }, "MethodWithOpts": { AnchorIDL: "test idl 2", Encoding: config.EncodingTypeBorsh, - Procedures: []config.ChainReaderProcedure{ - { - IDLAccount: testutils.TestStructWithNestedStruct, - OutputModifications: codeccommon.ModifiersConfig{ - &codeccommon.PropertyExtractorConfig{FieldName: "DurationVal"}, - }, - RPCOpts: &config.RPCOpts{ - Encoding: &encodingBase64, - Commitment: &commitment, - DataSlice: &rpc.DataSlice{ - Offset: &offset, - Length: &length, - }, + Procedure: config.ChainReaderProcedure{ + IDLAccount: testutils.TestStructWithNestedStruct, + OutputModifications: codeccommon.ModifiersConfig{ + &codeccommon.PropertyExtractorConfig{FieldName: "DurationVal"}, + }, + RPCOpts: &config.RPCOpts{ + Encoding: &encodingBase64, + Commitment: &commitment, + DataSlice: &rpc.DataSlice{ + Offset: &offset, + Length: &length, }, }, }, @@ -123,10 +119,8 @@ var validChainReaderConfig = config.ChainReader{ "Method": { AnchorIDL: "test idl 3", Encoding: config.EncodingTypeBincode, - Procedures: []config.ChainReaderProcedure{ - { - IDLAccount: testutils.TestStructWithNestedStruct, - }, + Procedure: config.ChainReaderProcedure{ + IDLAccount: testutils.TestStructWithNestedStruct, }, }, }, diff --git a/pkg/solana/config/testChainReader_invalid.json b/pkg/solana/config/testChainReader_invalid.json index b428b6115..98caa8fcc 100644 --- a/pkg/solana/config/testChainReader_invalid.json +++ b/pkg/solana/config/testChainReader_invalid.json @@ -5,9 +5,9 @@ "Method": { "anchorIDL": "test idl 1", "encoding": "invalid", - "procedures": [{ + "procedure": { "idlAccount": "StructWithNestedStruct" - }] + } } } } diff --git a/pkg/solana/config/testChainReader_valid.json b/pkg/solana/config/testChainReader_valid.json index 6dfbe0626..ca75a936b 100644 --- a/pkg/solana/config/testChainReader_valid.json +++ b/pkg/solana/config/testChainReader_valid.json @@ -5,14 +5,14 @@ "Method": { "anchorIDL": "test idl 1", "encoding": "borsh", - "procedures": [{ + "procedure": { "idlAccount": "StructWithNestedStruct" - }] + } }, "MethodWithOpts": { "anchorIDL": "test idl 2", "encoding": "borsh", - "procedures": [{ + "procedure": { "idlAccount": "StructWithNestedStruct", "outputModifications": [{ "Type": "extract property", @@ -26,7 +26,7 @@ "length": 10 } } - }] + } } } }, @@ -35,9 +35,9 @@ "Method": { "anchorIDL": "test idl 3", "encoding": "bincode", - "procedures": [{ + "procedure": { "idlAccount": "StructWithNestedStruct" - }] + } } } } From e2db20a6a9691d10470ee7a843a93a5c0e79b1ac Mon Sep 17 00:00:00 2001 From: amit-momin <108959691+amit-momin@users.noreply.github.com> Date: Mon, 18 Nov 2024 13:08:57 -0600 Subject: [PATCH 08/12] Update simulation to set max compute unit limit and enable sig verification (#919) * Updated simulation to set max CU limit and enabled sig verification * Added signature to simulation tx and fixed tests * Cleaned up code * Updated simulation to explicitly set the configured commitment to avoid using the default * Added check to ensure estimated compute unit limit does not exceed max after adding buffer * Fixed linting --- pkg/solana/txm/txm.go | 44 +++++++++++++++++++++-------- pkg/solana/txm/txm_internal_test.go | 32 ++++++++++++++++----- pkg/solana/txm/txm_unit_test.go | 43 ++++++++++++++++++++++++++-- 3 files changed, 98 insertions(+), 21 deletions(-) diff --git a/pkg/solana/txm/txm.go b/pkg/solana/txm/txm.go index 2a99a6c44..e34c99cef 100644 --- a/pkg/solana/txm/txm.go +++ b/pkg/solana/txm/txm.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "math" "math/big" "strings" "sync" @@ -20,6 +19,7 @@ import ( commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" "github.com/smartcontractkit/chainlink-common/pkg/utils" bigmath "github.com/smartcontractkit/chainlink-common/pkg/utils/big_math" + "github.com/smartcontractkit/chainlink-common/pkg/utils/mathutil" "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" "github.com/smartcontractkit/chainlink-solana/pkg/solana/config" @@ -33,6 +33,7 @@ const ( MaxSigsToConfirm = 256 // max number of signatures in GetSignatureStatus call EstimateComputeUnitLimitBuffer = 10 // percent buffer added on top of estimated compute unit limits to account for any variance TxReapInterval = 10 * time.Second // interval of time between reaping transactions that have met the retention threshold + MaxComputeUnitLimit = 1_400_000 // max compute unit limit a transaction can have ) var _ services.Service = (*Txm)(nil) @@ -616,7 +617,7 @@ func (txm *Txm) Enqueue(ctx context.Context, accountID string, tx *solanaGo.Tran select { case txm.chSend <- msg: default: - txm.lggr.Errorw("failed to enqeue tx", "queueFull", len(txm.chSend) == MaxQueueLen, "tx", msg) + txm.lggr.Errorw("failed to enqueue tx", "queueFull", len(txm.chSend) == MaxQueueLen, "tx", msg) return fmt.Errorf("failed to enqueue transaction for %s", accountID) } return nil @@ -646,7 +647,28 @@ func (txm *Txm) GetTransactionStatus(ctx context.Context, transactionID string) // EstimateComputeUnitLimit estimates the compute unit limit needed for a transaction. // It simulates the provided transaction to determine the used compute and applies a buffer to it. func (txm *Txm) EstimateComputeUnitLimit(ctx context.Context, tx *solanaGo.Transaction) (uint32, error) { - res, err := txm.simulateTx(ctx, tx) + txCopy := *tx + + // Set max compute unit limit when simulating a transaction to avoid getting an error for exceeding the default 200k compute unit limit + if computeUnitLimitErr := fees.SetComputeUnitLimit(&txCopy, fees.ComputeUnitLimit(MaxComputeUnitLimit)); computeUnitLimitErr != nil { + txm.lggr.Errorw("failed to set compute unit limit when simulating tx", "error", computeUnitLimitErr) + return 0, computeUnitLimitErr + } + + // Sign and set signature in tx copy for simulation + txMsg, marshalErr := txCopy.Message.MarshalBinary() + if marshalErr != nil { + return 0, fmt.Errorf("failed to marshal tx message: %w", marshalErr) + } + sigBytes, signErr := txm.ks.Sign(ctx, txCopy.Message.AccountKeys[0].String(), txMsg) + if signErr != nil { + return 0, fmt.Errorf("failed to sign transaction: %w", signErr) + } + var sig [64]byte + copy(sig[:], sigBytes) + txCopy.Signatures = append(txCopy.Signatures, sig) + + res, err := txm.simulateTx(ctx, &txCopy) if err != nil { return 0, err } @@ -654,8 +676,8 @@ func (txm *Txm) EstimateComputeUnitLimit(ctx context.Context, tx *solanaGo.Trans // Return error if response err is non-nil to avoid broadcasting a tx destined to fail if res.Err != nil { sig := solanaGo.Signature{} - if len(tx.Signatures) > 0 { - sig = tx.Signatures[0] + if len(txCopy.Signatures) > 0 { + sig = txCopy.Signatures[0] } txm.processSimulationError("", sig, res) return 0, fmt.Errorf("simulated tx returned error: %v", res.Err) @@ -672,13 +694,10 @@ func (txm *Txm) EstimateComputeUnitLimit(ctx context.Context, tx *solanaGo.Trans // Add buffer to the used compute estimate unitsConsumed = bigmath.AddPercentage(new(big.Int).SetUint64(unitsConsumed), EstimateComputeUnitLimitBuffer).Uint64() - if unitsConsumed > math.MaxUint32 { - txm.lggr.Debug("compute units used with buffer greater than uint32 max", "unitsConsumed", unitsConsumed) - // Do not return error to allow falling back to default compute unit limit - return 0, nil - } + // Ensure unitsConsumed does not exceed the max compute unit limit for a transaction after adding buffer + unitsConsumed = mathutil.Min(unitsConsumed, MaxComputeUnitLimit) - return uint32(unitsConsumed), nil + return uint32(unitsConsumed), nil //nolint // unitsConsumed can only be a maximum of 1.4M } // simulateTx simulates transactions using the SimulateTx client method @@ -690,7 +709,8 @@ func (txm *Txm) simulateTx(ctx context.Context, tx *solanaGo.Transaction) (res * return } - res, err = client.SimulateTx(ctx, tx, nil) // use default options (does not verify signatures) + // Simulate with signature verification enabled since it can have an impact on the compute units used + res, err = client.SimulateTx(ctx, tx, &rpc.SimulateTransactionOpts{SigVerify: true, Commitment: txm.cfg.Commitment()}) if err != nil { // This error can occur if endpoint goes down or if invalid signature txm.lggr.Errorw("failed to simulate tx", "error", err) diff --git a/pkg/solana/txm/txm_internal_test.go b/pkg/solana/txm/txm_internal_test.go index d246220a7..f19b26b9a 100644 --- a/pkg/solana/txm/txm_internal_test.go +++ b/pkg/solana/txm/txm_internal_test.go @@ -910,6 +910,8 @@ func TestTxm_compute_unit_limit_estimation(t *testing.T) { t.Run("simulation_succeeds", func(t *testing.T) { // Test tx is not discarded due to confirm timeout and tracked to finalization tx, signed := getTx(t, 1, mkey) + // add signature and compute unit limit to tx for simulation (excludes compute unit price) + simulateTx := addSigAndLimitToTx(t, mkey, solana.PublicKey{}, *tx, MaxComputeUnitLimit) sig := randomSignature(t) var wg sync.WaitGroup wg.Add(3) @@ -917,11 +919,11 @@ func TestTxm_compute_unit_limit_estimation(t *testing.T) { computeUnitConsumed := uint64(1_000_000) computeUnitLimit := fees.ComputeUnitLimit(uint32(bigmath.AddPercentage(new(big.Int).SetUint64(computeUnitConsumed), EstimateComputeUnitLimitBuffer).Uint64())) mc.On("SendTx", mock.Anything, signed(0, true, computeUnitLimit)).Return(sig, nil) - // First simulated before broadcast without signature or compute unit limit set - mc.On("SimulateTx", mock.Anything, tx, mock.Anything).Run(func(mock.Arguments) { + // First simulation before broadcast with signature and max compute unit limit set + mc.On("SimulateTx", mock.Anything, simulateTx, mock.Anything).Run(func(mock.Arguments) { wg.Done() }).Return(&rpc.SimulateTransactionResult{UnitsConsumed: &computeUnitConsumed}, nil).Once() - // Second simulated after broadcast with signature and compute unit limit set + // Second simulation after broadcast with signature and compute unit limit set mc.On("SimulateTx", mock.Anything, signed(0, true, computeUnitLimit), mock.Anything).Run(func(mock.Arguments) { wg.Done() }).Return(&rpc.SimulateTransactionResult{UnitsConsumed: &computeUnitConsumed}, nil).Once() @@ -982,11 +984,13 @@ func TestTxm_compute_unit_limit_estimation(t *testing.T) { t.Run("simulation_returns_error", func(t *testing.T) { // Test tx is not discarded due to confirm timeout and tracked to finalization - tx, signed := getTx(t, 1, mkey) + tx, _ := getTx(t, 1, mkey) + // add signature and compute unit limit to tx for simulation (excludes compute unit price) + simulateTx := addSigAndLimitToTx(t, mkey, solana.PublicKey{}, *tx, MaxComputeUnitLimit) sig := randomSignature(t) - - mc.On("SendTx", mock.Anything, signed(0, true, fees.ComputeUnitLimit(0))).Return(sig, nil).Panic("SendTx should never be called").Maybe() - mc.On("SimulateTx", mock.Anything, tx, mock.Anything).Return(&rpc.SimulateTransactionResult{Err: errors.New("tx err")}, nil).Once() + mc.On("SendTx", mock.Anything, mock.Anything).Return(sig, nil).Panic("SendTx should never be called").Maybe() + // First simulation before broadcast with max compute unit limit + mc.On("SimulateTx", mock.Anything, simulateTx, mock.Anything).Return(&rpc.SimulateTransactionResult{Err: errors.New("tx err")}, nil).Once() // tx should NOT be able to queue assert.Error(t, txm.Enqueue(ctx, t.Name(), tx, nil)) @@ -1072,3 +1076,17 @@ func TestTxm_Enqueue(t *testing.T) { }) } } + +func addSigAndLimitToTx(t *testing.T, keystore SimpleKeystore, pubkey solana.PublicKey, tx solana.Transaction, limit fees.ComputeUnitLimit) *solana.Transaction { + txCopy := tx + // sign tx + txMsg, err := tx.Message.MarshalBinary() + require.NoError(t, err) + sigBytes, err := keystore.Sign(context.Background(), pubkey.String(), txMsg) + require.NoError(t, err) + var sig [64]byte + copy(sig[:], sigBytes) + txCopy.Signatures = append(txCopy.Signatures, sig) + require.NoError(t, fees.SetComputeUnitLimit(&txCopy, limit)) + return &txCopy +} diff --git a/pkg/solana/txm/txm_unit_test.go b/pkg/solana/txm/txm_unit_test.go index bb2108f4e..0bac3e478 100644 --- a/pkg/solana/txm/txm_unit_test.go +++ b/pkg/solana/txm/txm_unit_test.go @@ -14,6 +14,7 @@ import ( solanaClient "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" clientmocks "github.com/smartcontractkit/chainlink-solana/pkg/solana/client/mocks" "github.com/smartcontractkit/chainlink-solana/pkg/solana/config" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/fees" solanatxm "github.com/smartcontractkit/chainlink-solana/pkg/solana/txm" keyMocks "github.com/smartcontractkit/chainlink-solana/pkg/solana/txm/mocks" @@ -25,11 +26,11 @@ import ( func TestTxm_EstimateComputeUnitLimit(t *testing.T) { t.Parallel() - ctx := tests.Context(t) // setup mock keystore mkey := keyMocks.NewSimpleKeystore(t) + mkey.On("Sign", mock.Anything, mock.Anything, mock.Anything).Return([]byte{}, nil) // setup key key, err := solana.NewRandomPrivateKey() @@ -57,7 +58,17 @@ func TestTxm_EstimateComputeUnitLimit(t *testing.T) { Blockhash: solana.Hash{}, }, }, nil).Once() - client.On("SimulateTx", mock.Anything, mock.Anything, mock.Anything).Return(&rpc.SimulateTransactionResult{ + client.On("SimulateTx", mock.Anything, mock.IsType(&solana.Transaction{}), mock.IsType(&rpc.SimulateTransactionOpts{})).Run(func(args mock.Arguments) { + // Validate max compute unit limit is set in transaction + tx := args.Get(1).(*solana.Transaction) + limit, err := fees.ParseComputeUnitLimit(tx.Message.Instructions[len(tx.Message.Instructions)-1].Data) + require.NoError(t, err) + require.Equal(t, fees.ComputeUnitLimit(solanatxm.MaxComputeUnitLimit), limit) + + // Validate signature verification is enabled + opts := args.Get(2).(*rpc.SimulateTransactionOpts) + require.True(t, opts.SigVerify) + }).Return(&rpc.SimulateTransactionResult{ Err: nil, UnitsConsumed: &usedCompute, }, nil).Once() @@ -111,6 +122,34 @@ func TestTxm_EstimateComputeUnitLimit(t *testing.T) { require.NoError(t, err) require.Equal(t, uint32(0), computeUnitLimit) }) + + t.Run("simulation returns max compute unit limit if adding buffer exceeds it", func(t *testing.T) { + usedCompute := uint64(1_400_000) + client.On("LatestBlockhash", mock.Anything).Return(&rpc.GetLatestBlockhashResult{ + Value: &rpc.LatestBlockhashResult{ + LastValidBlockHeight: 100, + Blockhash: solana.Hash{}, + }, + }, nil).Once() + client.On("SimulateTx", mock.Anything, mock.IsType(&solana.Transaction{}), mock.IsType(&rpc.SimulateTransactionOpts{})).Run(func(args mock.Arguments) { + // Validate max compute unit limit is set in transaction + tx := args.Get(1).(*solana.Transaction) + limit, err := fees.ParseComputeUnitLimit(tx.Message.Instructions[len(tx.Message.Instructions)-1].Data) + require.NoError(t, err) + require.Equal(t, fees.ComputeUnitLimit(solanatxm.MaxComputeUnitLimit), limit) + + // Validate signature verification is enabled + opts := args.Get(2).(*rpc.SimulateTransactionOpts) + require.True(t, opts.SigVerify) + }).Return(&rpc.SimulateTransactionResult{ + Err: nil, + UnitsConsumed: &usedCompute, + }, nil).Once() + tx := createTx(t, client, pubKey, pubKey, pubKeyReceiver, solana.LAMPORTS_PER_SOL) + computeUnitLimit, err := txm.EstimateComputeUnitLimit(ctx, tx) + require.NoError(t, err) + require.Equal(t, uint32(1_400_000), computeUnitLimit) + }) } func createTx(t *testing.T, client solanaClient.ReaderWriter, signer solana.PublicKey, sender solana.PublicKey, receiver solana.PublicKey, amt uint64) *solana.Transaction { From 43eb1243b3660d1bcfa6e0186cb4f001c9ded12d Mon Sep 17 00:00:00 2001 From: Awbrey Hughlett Date: Tue, 19 Nov 2024 20:02:48 -0500 Subject: [PATCH 09/12] Move mockery configs to single config file (#932) * move mockery to config file and update go version in build script * add mockery to makefile --- .mockery.yaml | 39 + Makefile | 1 + pkg/monitoring/chain_reader.go | 1 - pkg/monitoring/metrics/feedbalances.go | 2 - pkg/monitoring/metrics/fees.go | 2 - pkg/monitoring/metrics/mocks/FeedBalances.go | 69 -- pkg/monitoring/metrics/mocks/Fees.go | 66 ++ pkg/monitoring/metrics/mocks/NetworkFees.go | 37 - pkg/monitoring/metrics/mocks/NodeBalances.go | 34 - pkg/monitoring/metrics/mocks/NodeSuccess.go | 37 - .../metrics/mocks/ReportObservations.go | 37 - pkg/monitoring/metrics/mocks/SlotHeight.go | 37 - pkg/monitoring/metrics/mocks/feed_balances.go | 164 ++++ pkg/monitoring/metrics/mocks/network_fees.go | 101 +++ pkg/monitoring/metrics/mocks/node_balances.go | 101 +++ pkg/monitoring/metrics/mocks/node_success.go | 102 +++ .../metrics/mocks/report_observations.go | 102 +++ pkg/monitoring/metrics/mocks/slot_height.go | 102 +++ pkg/monitoring/metrics/networkfees.go | 2 - pkg/monitoring/metrics/nodebalances.go | 2 - pkg/monitoring/metrics/nodesuccess.go | 2 - pkg/monitoring/metrics/reportobservations.go | 2 - pkg/monitoring/metrics/slotheight.go | 2 - pkg/monitoring/mocks/ChainReader.go | 282 ------- pkg/monitoring/mocks/chain_reader.go | 527 +++++++++++++ pkg/solana/client/client.go | 1 - pkg/solana/client/mocks/ReaderWriter.go | 384 --------- pkg/solana/client/mocks/reader_writer.go | 739 ++++++++++++++++++ pkg/solana/config/config.go | 1 - pkg/solana/config/mocks/config.go | 548 +++++++++++++ pkg/solana/fees/estimator.go | 1 - pkg/solana/fees/mocks/Estimator.go | 90 +++ pkg/solana/txm/mocks/simple_keystore.go | 66 ++ pkg/solana/txm/txm.go | 1 - scripts/build-contract-artifacts-action.sh | 4 +- 35 files changed, 2750 insertions(+), 938 deletions(-) create mode 100644 .mockery.yaml delete mode 100644 pkg/monitoring/metrics/mocks/FeedBalances.go delete mode 100644 pkg/monitoring/metrics/mocks/NetworkFees.go delete mode 100644 pkg/monitoring/metrics/mocks/NodeBalances.go delete mode 100644 pkg/monitoring/metrics/mocks/NodeSuccess.go delete mode 100644 pkg/monitoring/metrics/mocks/ReportObservations.go delete mode 100644 pkg/monitoring/metrics/mocks/SlotHeight.go create mode 100644 pkg/monitoring/metrics/mocks/feed_balances.go create mode 100644 pkg/monitoring/metrics/mocks/network_fees.go create mode 100644 pkg/monitoring/metrics/mocks/node_balances.go create mode 100644 pkg/monitoring/metrics/mocks/node_success.go create mode 100644 pkg/monitoring/metrics/mocks/report_observations.go create mode 100644 pkg/monitoring/metrics/mocks/slot_height.go delete mode 100644 pkg/monitoring/mocks/ChainReader.go create mode 100644 pkg/monitoring/mocks/chain_reader.go delete mode 100644 pkg/solana/client/mocks/ReaderWriter.go create mode 100644 pkg/solana/client/mocks/reader_writer.go diff --git a/.mockery.yaml b/.mockery.yaml new file mode 100644 index 000000000..1ef8d4a73 --- /dev/null +++ b/.mockery.yaml @@ -0,0 +1,39 @@ +dir: "{{ .InterfaceDir }}/mocks" +mockname: "{{ .InterfaceName }}" +outpkg: mocks +filename: "{{ .InterfaceName | snakecase }}.go" +packages: + github.com/smartcontractkit/chainlink-solana/pkg/monitoring: + interfaces: + ChainReader: + github.com/smartcontractkit/chainlink-solana/pkg/monitoring/metrics: + interfaces: + FeedBalances: + Fees: + config: + filename: "Fees.go" + NetworkFees: + NodeBalances: + NodeSuccess: + ReportObservations: + SlotHeight: + github.com/smartcontractkit/chainlink-solana/pkg/solana/client: + interfaces: + ReaderWriter: + github.com/smartcontractkit/chainlink-solana/pkg/solana/config: + interfaces: + Config: + config: + filename: config.go + case: underscore + github.com/smartcontractkit/chainlink-solana/pkg/solana/fees: + interfaces: + Estimator: + config: + filename: "Estimator.go" + github.com/smartcontractkit/chainlink-solana/pkg/solana/txm: + interfaces: + SimpleKeystore: + config: + filename: simple_keystore.go + case: underscore \ No newline at end of file diff --git a/Makefile b/Makefile index 27f3f7d31..0f3fd8843 100644 --- a/Makefile +++ b/Makefile @@ -111,6 +111,7 @@ rm-mocked: .PHONY: generate generate: mockery gomods gomods -w go generate -x ./... + mockery .PHONY: lint-go-integration-tests lint-go-integration-tests: diff --git a/pkg/monitoring/chain_reader.go b/pkg/monitoring/chain_reader.go index 9b8c8ebff..8aef3f5b1 100644 --- a/pkg/monitoring/chain_reader.go +++ b/pkg/monitoring/chain_reader.go @@ -10,7 +10,6 @@ import ( "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" ) -//go:generate mockery --name ChainReader --output ./mocks/ type ChainReader interface { GetState(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (state pkgSolana.State, blockHeight uint64, err error) GetLatestTransmission(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (answer pkgSolana.Answer, blockHeight uint64, err error) diff --git a/pkg/monitoring/metrics/feedbalances.go b/pkg/monitoring/metrics/feedbalances.go index c56c86a75..54e856ad4 100644 --- a/pkg/monitoring/metrics/feedbalances.go +++ b/pkg/monitoring/metrics/feedbalances.go @@ -8,8 +8,6 @@ import ( commonMonitoring "github.com/smartcontractkit/chainlink-common/pkg/monitoring" ) -//go:generate mockery --name FeedBalances --output ./mocks/ - type FeedBalances interface { Exists(balanceAccountName string) (*prometheus.GaugeVec, bool) SetBalance(balance uint64, balanceAccountName string, feedInput FeedInput) diff --git a/pkg/monitoring/metrics/fees.go b/pkg/monitoring/metrics/fees.go index 883b69485..05aed3fc9 100644 --- a/pkg/monitoring/metrics/fees.go +++ b/pkg/monitoring/metrics/fees.go @@ -7,8 +7,6 @@ import ( "github.com/smartcontractkit/chainlink-solana/pkg/solana/fees" ) -//go:generate mockery --name Fees --output ./mocks/ - type Fees interface { Set(txFee uint64, computeUnitPrice fees.ComputeUnitPrice, feedInput FeedInput) Cleanup(feedInput FeedInput) diff --git a/pkg/monitoring/metrics/mocks/FeedBalances.go b/pkg/monitoring/metrics/mocks/FeedBalances.go deleted file mode 100644 index ded286e81..000000000 --- a/pkg/monitoring/metrics/mocks/FeedBalances.go +++ /dev/null @@ -1,69 +0,0 @@ -// Code generated by mockery v2.43.2. DO NOT EDIT. - -package mocks - -import ( - metrics "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/metrics" - mock "github.com/stretchr/testify/mock" - - prometheus "github.com/prometheus/client_golang/prometheus" -) - -// FeedBalances is an autogenerated mock type for the FeedBalances type -type FeedBalances struct { - mock.Mock -} - -// Cleanup provides a mock function with given fields: balanceAccountName, feedInput -func (_m *FeedBalances) Cleanup(balanceAccountName string, feedInput metrics.FeedInput) { - _m.Called(balanceAccountName, feedInput) -} - -// Exists provides a mock function with given fields: balanceAccountName -func (_m *FeedBalances) Exists(balanceAccountName string) (*prometheus.GaugeVec, bool) { - ret := _m.Called(balanceAccountName) - - if len(ret) == 0 { - panic("no return value specified for Exists") - } - - var r0 *prometheus.GaugeVec - var r1 bool - if rf, ok := ret.Get(0).(func(string) (*prometheus.GaugeVec, bool)); ok { - return rf(balanceAccountName) - } - if rf, ok := ret.Get(0).(func(string) *prometheus.GaugeVec); ok { - r0 = rf(balanceAccountName) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*prometheus.GaugeVec) - } - } - - if rf, ok := ret.Get(1).(func(string) bool); ok { - r1 = rf(balanceAccountName) - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 -} - -// SetBalance provides a mock function with given fields: balance, balanceAccountName, feedInput -func (_m *FeedBalances) SetBalance(balance uint64, balanceAccountName string, feedInput metrics.FeedInput) { - _m.Called(balance, balanceAccountName, feedInput) -} - -// NewFeedBalances creates a new instance of FeedBalances. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewFeedBalances(t interface { - mock.TestingT - Cleanup(func()) -}) *FeedBalances { - mock := &FeedBalances{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/pkg/monitoring/metrics/mocks/Fees.go b/pkg/monitoring/metrics/mocks/Fees.go index e435cfabd..3bb621ec5 100644 --- a/pkg/monitoring/metrics/mocks/Fees.go +++ b/pkg/monitoring/metrics/mocks/Fees.go @@ -14,16 +14,82 @@ type Fees struct { mock.Mock } +type Fees_Expecter struct { + mock *mock.Mock +} + +func (_m *Fees) EXPECT() *Fees_Expecter { + return &Fees_Expecter{mock: &_m.Mock} +} + // Cleanup provides a mock function with given fields: feedInput func (_m *Fees) Cleanup(feedInput metrics.FeedInput) { _m.Called(feedInput) } +// Fees_Cleanup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cleanup' +type Fees_Cleanup_Call struct { + *mock.Call +} + +// Cleanup is a helper method to define mock.On call +// - feedInput metrics.FeedInput +func (_e *Fees_Expecter) Cleanup(feedInput interface{}) *Fees_Cleanup_Call { + return &Fees_Cleanup_Call{Call: _e.mock.On("Cleanup", feedInput)} +} + +func (_c *Fees_Cleanup_Call) Run(run func(feedInput metrics.FeedInput)) *Fees_Cleanup_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(metrics.FeedInput)) + }) + return _c +} + +func (_c *Fees_Cleanup_Call) Return() *Fees_Cleanup_Call { + _c.Call.Return() + return _c +} + +func (_c *Fees_Cleanup_Call) RunAndReturn(run func(metrics.FeedInput)) *Fees_Cleanup_Call { + _c.Call.Return(run) + return _c +} + // Set provides a mock function with given fields: txFee, computeUnitPrice, feedInput func (_m *Fees) Set(txFee uint64, computeUnitPrice fees.ComputeUnitPrice, feedInput metrics.FeedInput) { _m.Called(txFee, computeUnitPrice, feedInput) } +// Fees_Set_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Set' +type Fees_Set_Call struct { + *mock.Call +} + +// Set is a helper method to define mock.On call +// - txFee uint64 +// - computeUnitPrice fees.ComputeUnitPrice +// - feedInput metrics.FeedInput +func (_e *Fees_Expecter) Set(txFee interface{}, computeUnitPrice interface{}, feedInput interface{}) *Fees_Set_Call { + return &Fees_Set_Call{Call: _e.mock.On("Set", txFee, computeUnitPrice, feedInput)} +} + +func (_c *Fees_Set_Call) Run(run func(txFee uint64, computeUnitPrice fees.ComputeUnitPrice, feedInput metrics.FeedInput)) *Fees_Set_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint64), args[1].(fees.ComputeUnitPrice), args[2].(metrics.FeedInput)) + }) + return _c +} + +func (_c *Fees_Set_Call) Return() *Fees_Set_Call { + _c.Call.Return() + return _c +} + +func (_c *Fees_Set_Call) RunAndReturn(run func(uint64, fees.ComputeUnitPrice, metrics.FeedInput)) *Fees_Set_Call { + _c.Call.Return(run) + return _c +} + // NewFees creates a new instance of Fees. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewFees(t interface { diff --git a/pkg/monitoring/metrics/mocks/NetworkFees.go b/pkg/monitoring/metrics/mocks/NetworkFees.go deleted file mode 100644 index 3b27dbd93..000000000 --- a/pkg/monitoring/metrics/mocks/NetworkFees.go +++ /dev/null @@ -1,37 +0,0 @@ -// Code generated by mockery v2.43.2. DO NOT EDIT. - -package mocks - -import ( - metrics "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/metrics" - mock "github.com/stretchr/testify/mock" -) - -// NetworkFees is an autogenerated mock type for the NetworkFees type -type NetworkFees struct { - mock.Mock -} - -// Cleanup provides a mock function with given fields: -func (_m *NetworkFees) Cleanup() { - _m.Called() -} - -// Set provides a mock function with given fields: slot, chain -func (_m *NetworkFees) Set(slot metrics.NetworkFeesInput, chain string) { - _m.Called(slot, chain) -} - -// NewNetworkFees creates a new instance of NetworkFees. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewNetworkFees(t interface { - mock.TestingT - Cleanup(func()) -}) *NetworkFees { - mock := &NetworkFees{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/pkg/monitoring/metrics/mocks/NodeBalances.go b/pkg/monitoring/metrics/mocks/NodeBalances.go deleted file mode 100644 index bbf9aa4b8..000000000 --- a/pkg/monitoring/metrics/mocks/NodeBalances.go +++ /dev/null @@ -1,34 +0,0 @@ -// Code generated by mockery v2.43.2. DO NOT EDIT. - -package mocks - -import mock "github.com/stretchr/testify/mock" - -// NodeBalances is an autogenerated mock type for the NodeBalances type -type NodeBalances struct { - mock.Mock -} - -// Cleanup provides a mock function with given fields: address, operator -func (_m *NodeBalances) Cleanup(address string, operator string) { - _m.Called(address, operator) -} - -// SetBalance provides a mock function with given fields: balance, address, operator -func (_m *NodeBalances) SetBalance(balance uint64, address string, operator string) { - _m.Called(balance, address, operator) -} - -// NewNodeBalances creates a new instance of NodeBalances. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewNodeBalances(t interface { - mock.TestingT - Cleanup(func()) -}) *NodeBalances { - mock := &NodeBalances{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/pkg/monitoring/metrics/mocks/NodeSuccess.go b/pkg/monitoring/metrics/mocks/NodeSuccess.go deleted file mode 100644 index b80b46f59..000000000 --- a/pkg/monitoring/metrics/mocks/NodeSuccess.go +++ /dev/null @@ -1,37 +0,0 @@ -// Code generated by mockery v2.43.2. DO NOT EDIT. - -package mocks - -import ( - metrics "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/metrics" - mock "github.com/stretchr/testify/mock" -) - -// NodeSuccess is an autogenerated mock type for the NodeSuccess type -type NodeSuccess struct { - mock.Mock -} - -// Add provides a mock function with given fields: count, i -func (_m *NodeSuccess) Add(count int, i metrics.NodeFeedInput) { - _m.Called(count, i) -} - -// Cleanup provides a mock function with given fields: i -func (_m *NodeSuccess) Cleanup(i metrics.NodeFeedInput) { - _m.Called(i) -} - -// NewNodeSuccess creates a new instance of NodeSuccess. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewNodeSuccess(t interface { - mock.TestingT - Cleanup(func()) -}) *NodeSuccess { - mock := &NodeSuccess{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/pkg/monitoring/metrics/mocks/ReportObservations.go b/pkg/monitoring/metrics/mocks/ReportObservations.go deleted file mode 100644 index 814d997eb..000000000 --- a/pkg/monitoring/metrics/mocks/ReportObservations.go +++ /dev/null @@ -1,37 +0,0 @@ -// Code generated by mockery v2.43.2. DO NOT EDIT. - -package mocks - -import ( - metrics "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/metrics" - mock "github.com/stretchr/testify/mock" -) - -// ReportObservations is an autogenerated mock type for the ReportObservations type -type ReportObservations struct { - mock.Mock -} - -// Cleanup provides a mock function with given fields: feedInput -func (_m *ReportObservations) Cleanup(feedInput metrics.FeedInput) { - _m.Called(feedInput) -} - -// SetCount provides a mock function with given fields: count, feedInput -func (_m *ReportObservations) SetCount(count uint8, feedInput metrics.FeedInput) { - _m.Called(count, feedInput) -} - -// NewReportObservations creates a new instance of ReportObservations. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewReportObservations(t interface { - mock.TestingT - Cleanup(func()) -}) *ReportObservations { - mock := &ReportObservations{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/pkg/monitoring/metrics/mocks/SlotHeight.go b/pkg/monitoring/metrics/mocks/SlotHeight.go deleted file mode 100644 index 22de7542b..000000000 --- a/pkg/monitoring/metrics/mocks/SlotHeight.go +++ /dev/null @@ -1,37 +0,0 @@ -// Code generated by mockery v2.43.2. DO NOT EDIT. - -package mocks - -import ( - types "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/types" - mock "github.com/stretchr/testify/mock" -) - -// SlotHeight is an autogenerated mock type for the SlotHeight type -type SlotHeight struct { - mock.Mock -} - -// Cleanup provides a mock function with given fields: -func (_m *SlotHeight) Cleanup() { - _m.Called() -} - -// Set provides a mock function with given fields: slot, chain, url -func (_m *SlotHeight) Set(slot types.SlotHeight, chain string, url string) { - _m.Called(slot, chain, url) -} - -// NewSlotHeight creates a new instance of SlotHeight. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewSlotHeight(t interface { - mock.TestingT - Cleanup(func()) -}) *SlotHeight { - mock := &SlotHeight{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/pkg/monitoring/metrics/mocks/feed_balances.go b/pkg/monitoring/metrics/mocks/feed_balances.go new file mode 100644 index 000000000..4cbbcc858 --- /dev/null +++ b/pkg/monitoring/metrics/mocks/feed_balances.go @@ -0,0 +1,164 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + metrics "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/metrics" + mock "github.com/stretchr/testify/mock" + + prometheus "github.com/prometheus/client_golang/prometheus" +) + +// FeedBalances is an autogenerated mock type for the FeedBalances type +type FeedBalances struct { + mock.Mock +} + +type FeedBalances_Expecter struct { + mock *mock.Mock +} + +func (_m *FeedBalances) EXPECT() *FeedBalances_Expecter { + return &FeedBalances_Expecter{mock: &_m.Mock} +} + +// Cleanup provides a mock function with given fields: balanceAccountName, feedInput +func (_m *FeedBalances) Cleanup(balanceAccountName string, feedInput metrics.FeedInput) { + _m.Called(balanceAccountName, feedInput) +} + +// FeedBalances_Cleanup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cleanup' +type FeedBalances_Cleanup_Call struct { + *mock.Call +} + +// Cleanup is a helper method to define mock.On call +// - balanceAccountName string +// - feedInput metrics.FeedInput +func (_e *FeedBalances_Expecter) Cleanup(balanceAccountName interface{}, feedInput interface{}) *FeedBalances_Cleanup_Call { + return &FeedBalances_Cleanup_Call{Call: _e.mock.On("Cleanup", balanceAccountName, feedInput)} +} + +func (_c *FeedBalances_Cleanup_Call) Run(run func(balanceAccountName string, feedInput metrics.FeedInput)) *FeedBalances_Cleanup_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(metrics.FeedInput)) + }) + return _c +} + +func (_c *FeedBalances_Cleanup_Call) Return() *FeedBalances_Cleanup_Call { + _c.Call.Return() + return _c +} + +func (_c *FeedBalances_Cleanup_Call) RunAndReturn(run func(string, metrics.FeedInput)) *FeedBalances_Cleanup_Call { + _c.Call.Return(run) + return _c +} + +// Exists provides a mock function with given fields: balanceAccountName +func (_m *FeedBalances) Exists(balanceAccountName string) (*prometheus.GaugeVec, bool) { + ret := _m.Called(balanceAccountName) + + if len(ret) == 0 { + panic("no return value specified for Exists") + } + + var r0 *prometheus.GaugeVec + var r1 bool + if rf, ok := ret.Get(0).(func(string) (*prometheus.GaugeVec, bool)); ok { + return rf(balanceAccountName) + } + if rf, ok := ret.Get(0).(func(string) *prometheus.GaugeVec); ok { + r0 = rf(balanceAccountName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*prometheus.GaugeVec) + } + } + + if rf, ok := ret.Get(1).(func(string) bool); ok { + r1 = rf(balanceAccountName) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// FeedBalances_Exists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Exists' +type FeedBalances_Exists_Call struct { + *mock.Call +} + +// Exists is a helper method to define mock.On call +// - balanceAccountName string +func (_e *FeedBalances_Expecter) Exists(balanceAccountName interface{}) *FeedBalances_Exists_Call { + return &FeedBalances_Exists_Call{Call: _e.mock.On("Exists", balanceAccountName)} +} + +func (_c *FeedBalances_Exists_Call) Run(run func(balanceAccountName string)) *FeedBalances_Exists_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *FeedBalances_Exists_Call) Return(_a0 *prometheus.GaugeVec, _a1 bool) *FeedBalances_Exists_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *FeedBalances_Exists_Call) RunAndReturn(run func(string) (*prometheus.GaugeVec, bool)) *FeedBalances_Exists_Call { + _c.Call.Return(run) + return _c +} + +// SetBalance provides a mock function with given fields: balance, balanceAccountName, feedInput +func (_m *FeedBalances) SetBalance(balance uint64, balanceAccountName string, feedInput metrics.FeedInput) { + _m.Called(balance, balanceAccountName, feedInput) +} + +// FeedBalances_SetBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetBalance' +type FeedBalances_SetBalance_Call struct { + *mock.Call +} + +// SetBalance is a helper method to define mock.On call +// - balance uint64 +// - balanceAccountName string +// - feedInput metrics.FeedInput +func (_e *FeedBalances_Expecter) SetBalance(balance interface{}, balanceAccountName interface{}, feedInput interface{}) *FeedBalances_SetBalance_Call { + return &FeedBalances_SetBalance_Call{Call: _e.mock.On("SetBalance", balance, balanceAccountName, feedInput)} +} + +func (_c *FeedBalances_SetBalance_Call) Run(run func(balance uint64, balanceAccountName string, feedInput metrics.FeedInput)) *FeedBalances_SetBalance_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint64), args[1].(string), args[2].(metrics.FeedInput)) + }) + return _c +} + +func (_c *FeedBalances_SetBalance_Call) Return() *FeedBalances_SetBalance_Call { + _c.Call.Return() + return _c +} + +func (_c *FeedBalances_SetBalance_Call) RunAndReturn(run func(uint64, string, metrics.FeedInput)) *FeedBalances_SetBalance_Call { + _c.Call.Return(run) + return _c +} + +// NewFeedBalances creates a new instance of FeedBalances. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFeedBalances(t interface { + mock.TestingT + Cleanup(func()) +}) *FeedBalances { + mock := &FeedBalances{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/monitoring/metrics/mocks/network_fees.go b/pkg/monitoring/metrics/mocks/network_fees.go new file mode 100644 index 000000000..1257f3603 --- /dev/null +++ b/pkg/monitoring/metrics/mocks/network_fees.go @@ -0,0 +1,101 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + metrics "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/metrics" + mock "github.com/stretchr/testify/mock" +) + +// NetworkFees is an autogenerated mock type for the NetworkFees type +type NetworkFees struct { + mock.Mock +} + +type NetworkFees_Expecter struct { + mock *mock.Mock +} + +func (_m *NetworkFees) EXPECT() *NetworkFees_Expecter { + return &NetworkFees_Expecter{mock: &_m.Mock} +} + +// Cleanup provides a mock function with given fields: +func (_m *NetworkFees) Cleanup() { + _m.Called() +} + +// NetworkFees_Cleanup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cleanup' +type NetworkFees_Cleanup_Call struct { + *mock.Call +} + +// Cleanup is a helper method to define mock.On call +func (_e *NetworkFees_Expecter) Cleanup() *NetworkFees_Cleanup_Call { + return &NetworkFees_Cleanup_Call{Call: _e.mock.On("Cleanup")} +} + +func (_c *NetworkFees_Cleanup_Call) Run(run func()) *NetworkFees_Cleanup_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *NetworkFees_Cleanup_Call) Return() *NetworkFees_Cleanup_Call { + _c.Call.Return() + return _c +} + +func (_c *NetworkFees_Cleanup_Call) RunAndReturn(run func()) *NetworkFees_Cleanup_Call { + _c.Call.Return(run) + return _c +} + +// Set provides a mock function with given fields: slot, chain +func (_m *NetworkFees) Set(slot metrics.NetworkFeesInput, chain string) { + _m.Called(slot, chain) +} + +// NetworkFees_Set_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Set' +type NetworkFees_Set_Call struct { + *mock.Call +} + +// Set is a helper method to define mock.On call +// - slot metrics.NetworkFeesInput +// - chain string +func (_e *NetworkFees_Expecter) Set(slot interface{}, chain interface{}) *NetworkFees_Set_Call { + return &NetworkFees_Set_Call{Call: _e.mock.On("Set", slot, chain)} +} + +func (_c *NetworkFees_Set_Call) Run(run func(slot metrics.NetworkFeesInput, chain string)) *NetworkFees_Set_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(metrics.NetworkFeesInput), args[1].(string)) + }) + return _c +} + +func (_c *NetworkFees_Set_Call) Return() *NetworkFees_Set_Call { + _c.Call.Return() + return _c +} + +func (_c *NetworkFees_Set_Call) RunAndReturn(run func(metrics.NetworkFeesInput, string)) *NetworkFees_Set_Call { + _c.Call.Return(run) + return _c +} + +// NewNetworkFees creates a new instance of NetworkFees. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNetworkFees(t interface { + mock.TestingT + Cleanup(func()) +}) *NetworkFees { + mock := &NetworkFees{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/monitoring/metrics/mocks/node_balances.go b/pkg/monitoring/metrics/mocks/node_balances.go new file mode 100644 index 000000000..643a6ca48 --- /dev/null +++ b/pkg/monitoring/metrics/mocks/node_balances.go @@ -0,0 +1,101 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// NodeBalances is an autogenerated mock type for the NodeBalances type +type NodeBalances struct { + mock.Mock +} + +type NodeBalances_Expecter struct { + mock *mock.Mock +} + +func (_m *NodeBalances) EXPECT() *NodeBalances_Expecter { + return &NodeBalances_Expecter{mock: &_m.Mock} +} + +// Cleanup provides a mock function with given fields: address, operator +func (_m *NodeBalances) Cleanup(address string, operator string) { + _m.Called(address, operator) +} + +// NodeBalances_Cleanup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cleanup' +type NodeBalances_Cleanup_Call struct { + *mock.Call +} + +// Cleanup is a helper method to define mock.On call +// - address string +// - operator string +func (_e *NodeBalances_Expecter) Cleanup(address interface{}, operator interface{}) *NodeBalances_Cleanup_Call { + return &NodeBalances_Cleanup_Call{Call: _e.mock.On("Cleanup", address, operator)} +} + +func (_c *NodeBalances_Cleanup_Call) Run(run func(address string, operator string)) *NodeBalances_Cleanup_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string)) + }) + return _c +} + +func (_c *NodeBalances_Cleanup_Call) Return() *NodeBalances_Cleanup_Call { + _c.Call.Return() + return _c +} + +func (_c *NodeBalances_Cleanup_Call) RunAndReturn(run func(string, string)) *NodeBalances_Cleanup_Call { + _c.Call.Return(run) + return _c +} + +// SetBalance provides a mock function with given fields: balance, address, operator +func (_m *NodeBalances) SetBalance(balance uint64, address string, operator string) { + _m.Called(balance, address, operator) +} + +// NodeBalances_SetBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetBalance' +type NodeBalances_SetBalance_Call struct { + *mock.Call +} + +// SetBalance is a helper method to define mock.On call +// - balance uint64 +// - address string +// - operator string +func (_e *NodeBalances_Expecter) SetBalance(balance interface{}, address interface{}, operator interface{}) *NodeBalances_SetBalance_Call { + return &NodeBalances_SetBalance_Call{Call: _e.mock.On("SetBalance", balance, address, operator)} +} + +func (_c *NodeBalances_SetBalance_Call) Run(run func(balance uint64, address string, operator string)) *NodeBalances_SetBalance_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint64), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *NodeBalances_SetBalance_Call) Return() *NodeBalances_SetBalance_Call { + _c.Call.Return() + return _c +} + +func (_c *NodeBalances_SetBalance_Call) RunAndReturn(run func(uint64, string, string)) *NodeBalances_SetBalance_Call { + _c.Call.Return(run) + return _c +} + +// NewNodeBalances creates a new instance of NodeBalances. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNodeBalances(t interface { + mock.TestingT + Cleanup(func()) +}) *NodeBalances { + mock := &NodeBalances{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/monitoring/metrics/mocks/node_success.go b/pkg/monitoring/metrics/mocks/node_success.go new file mode 100644 index 000000000..e36aeb501 --- /dev/null +++ b/pkg/monitoring/metrics/mocks/node_success.go @@ -0,0 +1,102 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + metrics "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/metrics" + mock "github.com/stretchr/testify/mock" +) + +// NodeSuccess is an autogenerated mock type for the NodeSuccess type +type NodeSuccess struct { + mock.Mock +} + +type NodeSuccess_Expecter struct { + mock *mock.Mock +} + +func (_m *NodeSuccess) EXPECT() *NodeSuccess_Expecter { + return &NodeSuccess_Expecter{mock: &_m.Mock} +} + +// Add provides a mock function with given fields: count, i +func (_m *NodeSuccess) Add(count int, i metrics.NodeFeedInput) { + _m.Called(count, i) +} + +// NodeSuccess_Add_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Add' +type NodeSuccess_Add_Call struct { + *mock.Call +} + +// Add is a helper method to define mock.On call +// - count int +// - i metrics.NodeFeedInput +func (_e *NodeSuccess_Expecter) Add(count interface{}, i interface{}) *NodeSuccess_Add_Call { + return &NodeSuccess_Add_Call{Call: _e.mock.On("Add", count, i)} +} + +func (_c *NodeSuccess_Add_Call) Run(run func(count int, i metrics.NodeFeedInput)) *NodeSuccess_Add_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int), args[1].(metrics.NodeFeedInput)) + }) + return _c +} + +func (_c *NodeSuccess_Add_Call) Return() *NodeSuccess_Add_Call { + _c.Call.Return() + return _c +} + +func (_c *NodeSuccess_Add_Call) RunAndReturn(run func(int, metrics.NodeFeedInput)) *NodeSuccess_Add_Call { + _c.Call.Return(run) + return _c +} + +// Cleanup provides a mock function with given fields: i +func (_m *NodeSuccess) Cleanup(i metrics.NodeFeedInput) { + _m.Called(i) +} + +// NodeSuccess_Cleanup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cleanup' +type NodeSuccess_Cleanup_Call struct { + *mock.Call +} + +// Cleanup is a helper method to define mock.On call +// - i metrics.NodeFeedInput +func (_e *NodeSuccess_Expecter) Cleanup(i interface{}) *NodeSuccess_Cleanup_Call { + return &NodeSuccess_Cleanup_Call{Call: _e.mock.On("Cleanup", i)} +} + +func (_c *NodeSuccess_Cleanup_Call) Run(run func(i metrics.NodeFeedInput)) *NodeSuccess_Cleanup_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(metrics.NodeFeedInput)) + }) + return _c +} + +func (_c *NodeSuccess_Cleanup_Call) Return() *NodeSuccess_Cleanup_Call { + _c.Call.Return() + return _c +} + +func (_c *NodeSuccess_Cleanup_Call) RunAndReturn(run func(metrics.NodeFeedInput)) *NodeSuccess_Cleanup_Call { + _c.Call.Return(run) + return _c +} + +// NewNodeSuccess creates a new instance of NodeSuccess. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNodeSuccess(t interface { + mock.TestingT + Cleanup(func()) +}) *NodeSuccess { + mock := &NodeSuccess{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/monitoring/metrics/mocks/report_observations.go b/pkg/monitoring/metrics/mocks/report_observations.go new file mode 100644 index 000000000..78136a27a --- /dev/null +++ b/pkg/monitoring/metrics/mocks/report_observations.go @@ -0,0 +1,102 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + metrics "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/metrics" + mock "github.com/stretchr/testify/mock" +) + +// ReportObservations is an autogenerated mock type for the ReportObservations type +type ReportObservations struct { + mock.Mock +} + +type ReportObservations_Expecter struct { + mock *mock.Mock +} + +func (_m *ReportObservations) EXPECT() *ReportObservations_Expecter { + return &ReportObservations_Expecter{mock: &_m.Mock} +} + +// Cleanup provides a mock function with given fields: feedInput +func (_m *ReportObservations) Cleanup(feedInput metrics.FeedInput) { + _m.Called(feedInput) +} + +// ReportObservations_Cleanup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cleanup' +type ReportObservations_Cleanup_Call struct { + *mock.Call +} + +// Cleanup is a helper method to define mock.On call +// - feedInput metrics.FeedInput +func (_e *ReportObservations_Expecter) Cleanup(feedInput interface{}) *ReportObservations_Cleanup_Call { + return &ReportObservations_Cleanup_Call{Call: _e.mock.On("Cleanup", feedInput)} +} + +func (_c *ReportObservations_Cleanup_Call) Run(run func(feedInput metrics.FeedInput)) *ReportObservations_Cleanup_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(metrics.FeedInput)) + }) + return _c +} + +func (_c *ReportObservations_Cleanup_Call) Return() *ReportObservations_Cleanup_Call { + _c.Call.Return() + return _c +} + +func (_c *ReportObservations_Cleanup_Call) RunAndReturn(run func(metrics.FeedInput)) *ReportObservations_Cleanup_Call { + _c.Call.Return(run) + return _c +} + +// SetCount provides a mock function with given fields: count, feedInput +func (_m *ReportObservations) SetCount(count uint8, feedInput metrics.FeedInput) { + _m.Called(count, feedInput) +} + +// ReportObservations_SetCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetCount' +type ReportObservations_SetCount_Call struct { + *mock.Call +} + +// SetCount is a helper method to define mock.On call +// - count uint8 +// - feedInput metrics.FeedInput +func (_e *ReportObservations_Expecter) SetCount(count interface{}, feedInput interface{}) *ReportObservations_SetCount_Call { + return &ReportObservations_SetCount_Call{Call: _e.mock.On("SetCount", count, feedInput)} +} + +func (_c *ReportObservations_SetCount_Call) Run(run func(count uint8, feedInput metrics.FeedInput)) *ReportObservations_SetCount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint8), args[1].(metrics.FeedInput)) + }) + return _c +} + +func (_c *ReportObservations_SetCount_Call) Return() *ReportObservations_SetCount_Call { + _c.Call.Return() + return _c +} + +func (_c *ReportObservations_SetCount_Call) RunAndReturn(run func(uint8, metrics.FeedInput)) *ReportObservations_SetCount_Call { + _c.Call.Return(run) + return _c +} + +// NewReportObservations creates a new instance of ReportObservations. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReportObservations(t interface { + mock.TestingT + Cleanup(func()) +}) *ReportObservations { + mock := &ReportObservations{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/monitoring/metrics/mocks/slot_height.go b/pkg/monitoring/metrics/mocks/slot_height.go new file mode 100644 index 000000000..990caea39 --- /dev/null +++ b/pkg/monitoring/metrics/mocks/slot_height.go @@ -0,0 +1,102 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + types "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/types" + mock "github.com/stretchr/testify/mock" +) + +// SlotHeight is an autogenerated mock type for the SlotHeight type +type SlotHeight struct { + mock.Mock +} + +type SlotHeight_Expecter struct { + mock *mock.Mock +} + +func (_m *SlotHeight) EXPECT() *SlotHeight_Expecter { + return &SlotHeight_Expecter{mock: &_m.Mock} +} + +// Cleanup provides a mock function with given fields: +func (_m *SlotHeight) Cleanup() { + _m.Called() +} + +// SlotHeight_Cleanup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cleanup' +type SlotHeight_Cleanup_Call struct { + *mock.Call +} + +// Cleanup is a helper method to define mock.On call +func (_e *SlotHeight_Expecter) Cleanup() *SlotHeight_Cleanup_Call { + return &SlotHeight_Cleanup_Call{Call: _e.mock.On("Cleanup")} +} + +func (_c *SlotHeight_Cleanup_Call) Run(run func()) *SlotHeight_Cleanup_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *SlotHeight_Cleanup_Call) Return() *SlotHeight_Cleanup_Call { + _c.Call.Return() + return _c +} + +func (_c *SlotHeight_Cleanup_Call) RunAndReturn(run func()) *SlotHeight_Cleanup_Call { + _c.Call.Return(run) + return _c +} + +// Set provides a mock function with given fields: slot, chain, url +func (_m *SlotHeight) Set(slot types.SlotHeight, chain string, url string) { + _m.Called(slot, chain, url) +} + +// SlotHeight_Set_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Set' +type SlotHeight_Set_Call struct { + *mock.Call +} + +// Set is a helper method to define mock.On call +// - slot types.SlotHeight +// - chain string +// - url string +func (_e *SlotHeight_Expecter) Set(slot interface{}, chain interface{}, url interface{}) *SlotHeight_Set_Call { + return &SlotHeight_Set_Call{Call: _e.mock.On("Set", slot, chain, url)} +} + +func (_c *SlotHeight_Set_Call) Run(run func(slot types.SlotHeight, chain string, url string)) *SlotHeight_Set_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(types.SlotHeight), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *SlotHeight_Set_Call) Return() *SlotHeight_Set_Call { + _c.Call.Return() + return _c +} + +func (_c *SlotHeight_Set_Call) RunAndReturn(run func(types.SlotHeight, string, string)) *SlotHeight_Set_Call { + _c.Call.Return(run) + return _c +} + +// NewSlotHeight creates a new instance of SlotHeight. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSlotHeight(t interface { + mock.TestingT + Cleanup(func()) +}) *SlotHeight { + mock := &SlotHeight{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/monitoring/metrics/networkfees.go b/pkg/monitoring/metrics/networkfees.go index 9700529e3..be5f93b19 100644 --- a/pkg/monitoring/metrics/networkfees.go +++ b/pkg/monitoring/metrics/networkfees.go @@ -7,8 +7,6 @@ import ( "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/types" ) -//go:generate mockery --name NetworkFees --output ./mocks/ - type NetworkFees interface { Set(slot NetworkFeesInput, chain string) Cleanup() diff --git a/pkg/monitoring/metrics/nodebalances.go b/pkg/monitoring/metrics/nodebalances.go index 9e14fa19d..109c8decd 100644 --- a/pkg/monitoring/metrics/nodebalances.go +++ b/pkg/monitoring/metrics/nodebalances.go @@ -8,8 +8,6 @@ import ( "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/types" ) -//go:generate mockery --name NodeBalances --output ./mocks/ - type NodeBalances interface { SetBalance(balance uint64, address, operator string) Cleanup(address, operator string) diff --git a/pkg/monitoring/metrics/nodesuccess.go b/pkg/monitoring/metrics/nodesuccess.go index 73cc00d94..87511284b 100644 --- a/pkg/monitoring/metrics/nodesuccess.go +++ b/pkg/monitoring/metrics/nodesuccess.go @@ -6,8 +6,6 @@ import ( "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/types" ) -//go:generate mockery --name NodeSuccess --output ./mocks/ - type NodeSuccess interface { Add(count int, i NodeFeedInput) Cleanup(i NodeFeedInput) diff --git a/pkg/monitoring/metrics/reportobservations.go b/pkg/monitoring/metrics/reportobservations.go index f790a4319..7fe037d03 100644 --- a/pkg/monitoring/metrics/reportobservations.go +++ b/pkg/monitoring/metrics/reportobservations.go @@ -6,8 +6,6 @@ import ( "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/types" ) -//go:generate mockery --name ReportObservations --output ./mocks/ - type ReportObservations interface { SetCount(count uint8, feedInput FeedInput) Cleanup(feedInput FeedInput) diff --git a/pkg/monitoring/metrics/slotheight.go b/pkg/monitoring/metrics/slotheight.go index 2c4c5caf5..3ccfa2111 100644 --- a/pkg/monitoring/metrics/slotheight.go +++ b/pkg/monitoring/metrics/slotheight.go @@ -7,8 +7,6 @@ import ( "github.com/smartcontractkit/chainlink-solana/pkg/monitoring/types" ) -//go:generate mockery --name SlotHeight --output ./mocks/ - type SlotHeight interface { Set(slot types.SlotHeight, chain, url string) Cleanup() diff --git a/pkg/monitoring/mocks/ChainReader.go b/pkg/monitoring/mocks/ChainReader.go deleted file mode 100644 index 30b714305..000000000 --- a/pkg/monitoring/mocks/ChainReader.go +++ /dev/null @@ -1,282 +0,0 @@ -// Code generated by mockery v2.43.2. DO NOT EDIT. - -package mocks - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" - - pkgsolana "github.com/smartcontractkit/chainlink-solana/pkg/solana" - - rpc "github.com/gagliardetto/solana-go/rpc" - - solana "github.com/gagliardetto/solana-go" -) - -// ChainReader is an autogenerated mock type for the ChainReader type -type ChainReader struct { - mock.Mock -} - -// GetBalance provides a mock function with given fields: ctx, account, commitment -func (_m *ChainReader) GetBalance(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (*rpc.GetBalanceResult, error) { - ret := _m.Called(ctx, account, commitment) - - if len(ret) == 0 { - panic("no return value specified for GetBalance") - } - - var r0 *rpc.GetBalanceResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) (*rpc.GetBalanceResult, error)); ok { - return rf(ctx, account, commitment) - } - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) *rpc.GetBalanceResult); ok { - r0 = rf(ctx, account, commitment) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.GetBalanceResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, rpc.CommitmentType) error); ok { - r1 = rf(ctx, account, commitment) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetLatestBlock provides a mock function with given fields: ctx, commitment -func (_m *ChainReader) GetLatestBlock(ctx context.Context, commitment rpc.CommitmentType) (*rpc.GetBlockResult, error) { - ret := _m.Called(ctx, commitment) - - if len(ret) == 0 { - panic("no return value specified for GetLatestBlock") - } - - var r0 *rpc.GetBlockResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, rpc.CommitmentType) (*rpc.GetBlockResult, error)); ok { - return rf(ctx, commitment) - } - if rf, ok := ret.Get(0).(func(context.Context, rpc.CommitmentType) *rpc.GetBlockResult); ok { - r0 = rf(ctx, commitment) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.GetBlockResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, rpc.CommitmentType) error); ok { - r1 = rf(ctx, commitment) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetLatestTransmission provides a mock function with given fields: ctx, account, commitment -func (_m *ChainReader) GetLatestTransmission(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (pkgsolana.Answer, uint64, error) { - ret := _m.Called(ctx, account, commitment) - - if len(ret) == 0 { - panic("no return value specified for GetLatestTransmission") - } - - var r0 pkgsolana.Answer - var r1 uint64 - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) (pkgsolana.Answer, uint64, error)); ok { - return rf(ctx, account, commitment) - } - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) pkgsolana.Answer); ok { - r0 = rf(ctx, account, commitment) - } else { - r0 = ret.Get(0).(pkgsolana.Answer) - } - - if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, rpc.CommitmentType) uint64); ok { - r1 = rf(ctx, account, commitment) - } else { - r1 = ret.Get(1).(uint64) - } - - if rf, ok := ret.Get(2).(func(context.Context, solana.PublicKey, rpc.CommitmentType) error); ok { - r2 = rf(ctx, account, commitment) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// GetSignaturesForAddressWithOpts provides a mock function with given fields: ctx, account, opts -func (_m *ChainReader) GetSignaturesForAddressWithOpts(ctx context.Context, account solana.PublicKey, opts *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) { - ret := _m.Called(ctx, account, opts) - - if len(ret) == 0 { - panic("no return value specified for GetSignaturesForAddressWithOpts") - } - - var r0 []*rpc.TransactionSignature - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error)); ok { - return rf(ctx, account, opts) - } - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) []*rpc.TransactionSignature); ok { - r0 = rf(ctx, account, opts) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*rpc.TransactionSignature) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) error); ok { - r1 = rf(ctx, account, opts) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetSlot provides a mock function with given fields: ctx -func (_m *ChainReader) GetSlot(ctx context.Context) (uint64, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GetSlot") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetState provides a mock function with given fields: ctx, account, commitment -func (_m *ChainReader) GetState(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (pkgsolana.State, uint64, error) { - ret := _m.Called(ctx, account, commitment) - - if len(ret) == 0 { - panic("no return value specified for GetState") - } - - var r0 pkgsolana.State - var r1 uint64 - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) (pkgsolana.State, uint64, error)); ok { - return rf(ctx, account, commitment) - } - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) pkgsolana.State); ok { - r0 = rf(ctx, account, commitment) - } else { - r0 = ret.Get(0).(pkgsolana.State) - } - - if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, rpc.CommitmentType) uint64); ok { - r1 = rf(ctx, account, commitment) - } else { - r1 = ret.Get(1).(uint64) - } - - if rf, ok := ret.Get(2).(func(context.Context, solana.PublicKey, rpc.CommitmentType) error); ok { - r2 = rf(ctx, account, commitment) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// GetTokenAccountBalance provides a mock function with given fields: ctx, account, commitment -func (_m *ChainReader) GetTokenAccountBalance(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (*rpc.GetTokenAccountBalanceResult, error) { - ret := _m.Called(ctx, account, commitment) - - if len(ret) == 0 { - panic("no return value specified for GetTokenAccountBalance") - } - - var r0 *rpc.GetTokenAccountBalanceResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) (*rpc.GetTokenAccountBalanceResult, error)); ok { - return rf(ctx, account, commitment) - } - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) *rpc.GetTokenAccountBalanceResult); ok { - r0 = rf(ctx, account, commitment) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.GetTokenAccountBalanceResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, rpc.CommitmentType) error); ok { - r1 = rf(ctx, account, commitment) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetTransaction provides a mock function with given fields: ctx, txSig, opts -func (_m *ChainReader) GetTransaction(ctx context.Context, txSig solana.Signature, opts *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error) { - ret := _m.Called(ctx, txSig, opts) - - if len(ret) == 0 { - panic("no return value specified for GetTransaction") - } - - var r0 *rpc.GetTransactionResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, solana.Signature, *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error)); ok { - return rf(ctx, txSig, opts) - } - if rf, ok := ret.Get(0).(func(context.Context, solana.Signature, *rpc.GetTransactionOpts) *rpc.GetTransactionResult); ok { - r0 = rf(ctx, txSig, opts) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.GetTransactionResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, solana.Signature, *rpc.GetTransactionOpts) error); ok { - r1 = rf(ctx, txSig, opts) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NewChainReader creates a new instance of ChainReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewChainReader(t interface { - mock.TestingT - Cleanup(func()) -}) *ChainReader { - mock := &ChainReader{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/pkg/monitoring/mocks/chain_reader.go b/pkg/monitoring/mocks/chain_reader.go new file mode 100644 index 000000000..e6a1e655a --- /dev/null +++ b/pkg/monitoring/mocks/chain_reader.go @@ -0,0 +1,527 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + pkgsolana "github.com/smartcontractkit/chainlink-solana/pkg/solana" + + rpc "github.com/gagliardetto/solana-go/rpc" + + solana "github.com/gagliardetto/solana-go" +) + +// ChainReader is an autogenerated mock type for the ChainReader type +type ChainReader struct { + mock.Mock +} + +type ChainReader_Expecter struct { + mock *mock.Mock +} + +func (_m *ChainReader) EXPECT() *ChainReader_Expecter { + return &ChainReader_Expecter{mock: &_m.Mock} +} + +// GetBalance provides a mock function with given fields: ctx, account, commitment +func (_m *ChainReader) GetBalance(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (*rpc.GetBalanceResult, error) { + ret := _m.Called(ctx, account, commitment) + + if len(ret) == 0 { + panic("no return value specified for GetBalance") + } + + var r0 *rpc.GetBalanceResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) (*rpc.GetBalanceResult, error)); ok { + return rf(ctx, account, commitment) + } + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) *rpc.GetBalanceResult); ok { + r0 = rf(ctx, account, commitment) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.GetBalanceResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, rpc.CommitmentType) error); ok { + r1 = rf(ctx, account, commitment) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChainReader_GetBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBalance' +type ChainReader_GetBalance_Call struct { + *mock.Call +} + +// GetBalance is a helper method to define mock.On call +// - ctx context.Context +// - account solana.PublicKey +// - commitment rpc.CommitmentType +func (_e *ChainReader_Expecter) GetBalance(ctx interface{}, account interface{}, commitment interface{}) *ChainReader_GetBalance_Call { + return &ChainReader_GetBalance_Call{Call: _e.mock.On("GetBalance", ctx, account, commitment)} +} + +func (_c *ChainReader_GetBalance_Call) Run(run func(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType)) *ChainReader_GetBalance_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(solana.PublicKey), args[2].(rpc.CommitmentType)) + }) + return _c +} + +func (_c *ChainReader_GetBalance_Call) Return(out *rpc.GetBalanceResult, err error) *ChainReader_GetBalance_Call { + _c.Call.Return(out, err) + return _c +} + +func (_c *ChainReader_GetBalance_Call) RunAndReturn(run func(context.Context, solana.PublicKey, rpc.CommitmentType) (*rpc.GetBalanceResult, error)) *ChainReader_GetBalance_Call { + _c.Call.Return(run) + return _c +} + +// GetLatestBlock provides a mock function with given fields: ctx, commitment +func (_m *ChainReader) GetLatestBlock(ctx context.Context, commitment rpc.CommitmentType) (*rpc.GetBlockResult, error) { + ret := _m.Called(ctx, commitment) + + if len(ret) == 0 { + panic("no return value specified for GetLatestBlock") + } + + var r0 *rpc.GetBlockResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, rpc.CommitmentType) (*rpc.GetBlockResult, error)); ok { + return rf(ctx, commitment) + } + if rf, ok := ret.Get(0).(func(context.Context, rpc.CommitmentType) *rpc.GetBlockResult); ok { + r0 = rf(ctx, commitment) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.GetBlockResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, rpc.CommitmentType) error); ok { + r1 = rf(ctx, commitment) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChainReader_GetLatestBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestBlock' +type ChainReader_GetLatestBlock_Call struct { + *mock.Call +} + +// GetLatestBlock is a helper method to define mock.On call +// - ctx context.Context +// - commitment rpc.CommitmentType +func (_e *ChainReader_Expecter) GetLatestBlock(ctx interface{}, commitment interface{}) *ChainReader_GetLatestBlock_Call { + return &ChainReader_GetLatestBlock_Call{Call: _e.mock.On("GetLatestBlock", ctx, commitment)} +} + +func (_c *ChainReader_GetLatestBlock_Call) Run(run func(ctx context.Context, commitment rpc.CommitmentType)) *ChainReader_GetLatestBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(rpc.CommitmentType)) + }) + return _c +} + +func (_c *ChainReader_GetLatestBlock_Call) Return(_a0 *rpc.GetBlockResult, _a1 error) *ChainReader_GetLatestBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ChainReader_GetLatestBlock_Call) RunAndReturn(run func(context.Context, rpc.CommitmentType) (*rpc.GetBlockResult, error)) *ChainReader_GetLatestBlock_Call { + _c.Call.Return(run) + return _c +} + +// GetLatestTransmission provides a mock function with given fields: ctx, account, commitment +func (_m *ChainReader) GetLatestTransmission(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (pkgsolana.Answer, uint64, error) { + ret := _m.Called(ctx, account, commitment) + + if len(ret) == 0 { + panic("no return value specified for GetLatestTransmission") + } + + var r0 pkgsolana.Answer + var r1 uint64 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) (pkgsolana.Answer, uint64, error)); ok { + return rf(ctx, account, commitment) + } + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) pkgsolana.Answer); ok { + r0 = rf(ctx, account, commitment) + } else { + r0 = ret.Get(0).(pkgsolana.Answer) + } + + if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, rpc.CommitmentType) uint64); ok { + r1 = rf(ctx, account, commitment) + } else { + r1 = ret.Get(1).(uint64) + } + + if rf, ok := ret.Get(2).(func(context.Context, solana.PublicKey, rpc.CommitmentType) error); ok { + r2 = rf(ctx, account, commitment) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ChainReader_GetLatestTransmission_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestTransmission' +type ChainReader_GetLatestTransmission_Call struct { + *mock.Call +} + +// GetLatestTransmission is a helper method to define mock.On call +// - ctx context.Context +// - account solana.PublicKey +// - commitment rpc.CommitmentType +func (_e *ChainReader_Expecter) GetLatestTransmission(ctx interface{}, account interface{}, commitment interface{}) *ChainReader_GetLatestTransmission_Call { + return &ChainReader_GetLatestTransmission_Call{Call: _e.mock.On("GetLatestTransmission", ctx, account, commitment)} +} + +func (_c *ChainReader_GetLatestTransmission_Call) Run(run func(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType)) *ChainReader_GetLatestTransmission_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(solana.PublicKey), args[2].(rpc.CommitmentType)) + }) + return _c +} + +func (_c *ChainReader_GetLatestTransmission_Call) Return(answer pkgsolana.Answer, blockHeight uint64, err error) *ChainReader_GetLatestTransmission_Call { + _c.Call.Return(answer, blockHeight, err) + return _c +} + +func (_c *ChainReader_GetLatestTransmission_Call) RunAndReturn(run func(context.Context, solana.PublicKey, rpc.CommitmentType) (pkgsolana.Answer, uint64, error)) *ChainReader_GetLatestTransmission_Call { + _c.Call.Return(run) + return _c +} + +// GetSignaturesForAddressWithOpts provides a mock function with given fields: ctx, account, opts +func (_m *ChainReader) GetSignaturesForAddressWithOpts(ctx context.Context, account solana.PublicKey, opts *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) { + ret := _m.Called(ctx, account, opts) + + if len(ret) == 0 { + panic("no return value specified for GetSignaturesForAddressWithOpts") + } + + var r0 []*rpc.TransactionSignature + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error)); ok { + return rf(ctx, account, opts) + } + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) []*rpc.TransactionSignature); ok { + r0 = rf(ctx, account, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*rpc.TransactionSignature) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) error); ok { + r1 = rf(ctx, account, opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChainReader_GetSignaturesForAddressWithOpts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSignaturesForAddressWithOpts' +type ChainReader_GetSignaturesForAddressWithOpts_Call struct { + *mock.Call +} + +// GetSignaturesForAddressWithOpts is a helper method to define mock.On call +// - ctx context.Context +// - account solana.PublicKey +// - opts *rpc.GetSignaturesForAddressOpts +func (_e *ChainReader_Expecter) GetSignaturesForAddressWithOpts(ctx interface{}, account interface{}, opts interface{}) *ChainReader_GetSignaturesForAddressWithOpts_Call { + return &ChainReader_GetSignaturesForAddressWithOpts_Call{Call: _e.mock.On("GetSignaturesForAddressWithOpts", ctx, account, opts)} +} + +func (_c *ChainReader_GetSignaturesForAddressWithOpts_Call) Run(run func(ctx context.Context, account solana.PublicKey, opts *rpc.GetSignaturesForAddressOpts)) *ChainReader_GetSignaturesForAddressWithOpts_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(solana.PublicKey), args[2].(*rpc.GetSignaturesForAddressOpts)) + }) + return _c +} + +func (_c *ChainReader_GetSignaturesForAddressWithOpts_Call) Return(out []*rpc.TransactionSignature, err error) *ChainReader_GetSignaturesForAddressWithOpts_Call { + _c.Call.Return(out, err) + return _c +} + +func (_c *ChainReader_GetSignaturesForAddressWithOpts_Call) RunAndReturn(run func(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error)) *ChainReader_GetSignaturesForAddressWithOpts_Call { + _c.Call.Return(run) + return _c +} + +// GetSlot provides a mock function with given fields: ctx +func (_m *ChainReader) GetSlot(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetSlot") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChainReader_GetSlot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSlot' +type ChainReader_GetSlot_Call struct { + *mock.Call +} + +// GetSlot is a helper method to define mock.On call +// - ctx context.Context +func (_e *ChainReader_Expecter) GetSlot(ctx interface{}) *ChainReader_GetSlot_Call { + return &ChainReader_GetSlot_Call{Call: _e.mock.On("GetSlot", ctx)} +} + +func (_c *ChainReader_GetSlot_Call) Run(run func(ctx context.Context)) *ChainReader_GetSlot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ChainReader_GetSlot_Call) Return(slot uint64, err error) *ChainReader_GetSlot_Call { + _c.Call.Return(slot, err) + return _c +} + +func (_c *ChainReader_GetSlot_Call) RunAndReturn(run func(context.Context) (uint64, error)) *ChainReader_GetSlot_Call { + _c.Call.Return(run) + return _c +} + +// GetState provides a mock function with given fields: ctx, account, commitment +func (_m *ChainReader) GetState(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (pkgsolana.State, uint64, error) { + ret := _m.Called(ctx, account, commitment) + + if len(ret) == 0 { + panic("no return value specified for GetState") + } + + var r0 pkgsolana.State + var r1 uint64 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) (pkgsolana.State, uint64, error)); ok { + return rf(ctx, account, commitment) + } + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) pkgsolana.State); ok { + r0 = rf(ctx, account, commitment) + } else { + r0 = ret.Get(0).(pkgsolana.State) + } + + if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, rpc.CommitmentType) uint64); ok { + r1 = rf(ctx, account, commitment) + } else { + r1 = ret.Get(1).(uint64) + } + + if rf, ok := ret.Get(2).(func(context.Context, solana.PublicKey, rpc.CommitmentType) error); ok { + r2 = rf(ctx, account, commitment) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ChainReader_GetState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetState' +type ChainReader_GetState_Call struct { + *mock.Call +} + +// GetState is a helper method to define mock.On call +// - ctx context.Context +// - account solana.PublicKey +// - commitment rpc.CommitmentType +func (_e *ChainReader_Expecter) GetState(ctx interface{}, account interface{}, commitment interface{}) *ChainReader_GetState_Call { + return &ChainReader_GetState_Call{Call: _e.mock.On("GetState", ctx, account, commitment)} +} + +func (_c *ChainReader_GetState_Call) Run(run func(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType)) *ChainReader_GetState_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(solana.PublicKey), args[2].(rpc.CommitmentType)) + }) + return _c +} + +func (_c *ChainReader_GetState_Call) Return(state pkgsolana.State, blockHeight uint64, err error) *ChainReader_GetState_Call { + _c.Call.Return(state, blockHeight, err) + return _c +} + +func (_c *ChainReader_GetState_Call) RunAndReturn(run func(context.Context, solana.PublicKey, rpc.CommitmentType) (pkgsolana.State, uint64, error)) *ChainReader_GetState_Call { + _c.Call.Return(run) + return _c +} + +// GetTokenAccountBalance provides a mock function with given fields: ctx, account, commitment +func (_m *ChainReader) GetTokenAccountBalance(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType) (*rpc.GetTokenAccountBalanceResult, error) { + ret := _m.Called(ctx, account, commitment) + + if len(ret) == 0 { + panic("no return value specified for GetTokenAccountBalance") + } + + var r0 *rpc.GetTokenAccountBalanceResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) (*rpc.GetTokenAccountBalanceResult, error)); ok { + return rf(ctx, account, commitment) + } + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, rpc.CommitmentType) *rpc.GetTokenAccountBalanceResult); ok { + r0 = rf(ctx, account, commitment) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.GetTokenAccountBalanceResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, rpc.CommitmentType) error); ok { + r1 = rf(ctx, account, commitment) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChainReader_GetTokenAccountBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTokenAccountBalance' +type ChainReader_GetTokenAccountBalance_Call struct { + *mock.Call +} + +// GetTokenAccountBalance is a helper method to define mock.On call +// - ctx context.Context +// - account solana.PublicKey +// - commitment rpc.CommitmentType +func (_e *ChainReader_Expecter) GetTokenAccountBalance(ctx interface{}, account interface{}, commitment interface{}) *ChainReader_GetTokenAccountBalance_Call { + return &ChainReader_GetTokenAccountBalance_Call{Call: _e.mock.On("GetTokenAccountBalance", ctx, account, commitment)} +} + +func (_c *ChainReader_GetTokenAccountBalance_Call) Run(run func(ctx context.Context, account solana.PublicKey, commitment rpc.CommitmentType)) *ChainReader_GetTokenAccountBalance_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(solana.PublicKey), args[2].(rpc.CommitmentType)) + }) + return _c +} + +func (_c *ChainReader_GetTokenAccountBalance_Call) Return(out *rpc.GetTokenAccountBalanceResult, err error) *ChainReader_GetTokenAccountBalance_Call { + _c.Call.Return(out, err) + return _c +} + +func (_c *ChainReader_GetTokenAccountBalance_Call) RunAndReturn(run func(context.Context, solana.PublicKey, rpc.CommitmentType) (*rpc.GetTokenAccountBalanceResult, error)) *ChainReader_GetTokenAccountBalance_Call { + _c.Call.Return(run) + return _c +} + +// GetTransaction provides a mock function with given fields: ctx, txSig, opts +func (_m *ChainReader) GetTransaction(ctx context.Context, txSig solana.Signature, opts *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error) { + ret := _m.Called(ctx, txSig, opts) + + if len(ret) == 0 { + panic("no return value specified for GetTransaction") + } + + var r0 *rpc.GetTransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, solana.Signature, *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error)); ok { + return rf(ctx, txSig, opts) + } + if rf, ok := ret.Get(0).(func(context.Context, solana.Signature, *rpc.GetTransactionOpts) *rpc.GetTransactionResult); ok { + r0 = rf(ctx, txSig, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.GetTransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, solana.Signature, *rpc.GetTransactionOpts) error); ok { + r1 = rf(ctx, txSig, opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChainReader_GetTransaction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTransaction' +type ChainReader_GetTransaction_Call struct { + *mock.Call +} + +// GetTransaction is a helper method to define mock.On call +// - ctx context.Context +// - txSig solana.Signature +// - opts *rpc.GetTransactionOpts +func (_e *ChainReader_Expecter) GetTransaction(ctx interface{}, txSig interface{}, opts interface{}) *ChainReader_GetTransaction_Call { + return &ChainReader_GetTransaction_Call{Call: _e.mock.On("GetTransaction", ctx, txSig, opts)} +} + +func (_c *ChainReader_GetTransaction_Call) Run(run func(ctx context.Context, txSig solana.Signature, opts *rpc.GetTransactionOpts)) *ChainReader_GetTransaction_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(solana.Signature), args[2].(*rpc.GetTransactionOpts)) + }) + return _c +} + +func (_c *ChainReader_GetTransaction_Call) Return(out *rpc.GetTransactionResult, err error) *ChainReader_GetTransaction_Call { + _c.Call.Return(out, err) + return _c +} + +func (_c *ChainReader_GetTransaction_Call) RunAndReturn(run func(context.Context, solana.Signature, *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error)) *ChainReader_GetTransaction_Call { + _c.Call.Return(run) + return _c +} + +// NewChainReader creates a new instance of ChainReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChainReader(t interface { + mock.TestingT + Cleanup(func()) +}) *ChainReader { + mock := &ChainReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/solana/client/client.go b/pkg/solana/client/client.go index 18c0e4bfe..f9f6715b0 100644 --- a/pkg/solana/client/client.go +++ b/pkg/solana/client/client.go @@ -23,7 +23,6 @@ const ( MainnetGenesisHash = "5eykt4UsFv8P8NJdTREpY1vzqKqZKvdpKuc147dw2N9d" ) -//go:generate mockery --name ReaderWriter --output ./mocks/ type ReaderWriter interface { Writer Reader diff --git a/pkg/solana/client/mocks/ReaderWriter.go b/pkg/solana/client/mocks/ReaderWriter.go deleted file mode 100644 index f4d514459..000000000 --- a/pkg/solana/client/mocks/ReaderWriter.go +++ /dev/null @@ -1,384 +0,0 @@ -// Code generated by mockery v2.43.2. DO NOT EDIT. - -package mocks - -import ( - context "context" - - rpc "github.com/gagliardetto/solana-go/rpc" - multinode "github.com/smartcontractkit/chainlink-solana/pkg/solana/client/multinode" - mock "github.com/stretchr/testify/mock" - - solana "github.com/gagliardetto/solana-go" -) - -// ReaderWriter is an autogenerated mock type for the ReaderWriter type -type ReaderWriter struct { - mock.Mock -} - -// Balance provides a mock function with given fields: ctx, addr -func (_m *ReaderWriter) Balance(ctx context.Context, addr solana.PublicKey) (uint64, error) { - ret := _m.Called(ctx, addr) - - if len(ret) == 0 { - panic("no return value specified for Balance") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey) (uint64, error)); ok { - return rf(ctx, addr) - } - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey) uint64); ok { - r0 = rf(ctx, addr) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey) error); ok { - r1 = rf(ctx, addr) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ChainID provides a mock function with given fields: ctx -func (_m *ReaderWriter) ChainID(ctx context.Context) (multinode.StringID, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for ChainID") - } - - var r0 multinode.StringID - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (multinode.StringID, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) multinode.StringID); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(multinode.StringID) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetAccountInfoWithOpts provides a mock function with given fields: ctx, addr, opts -func (_m *ReaderWriter) GetAccountInfoWithOpts(ctx context.Context, addr solana.PublicKey, opts *rpc.GetAccountInfoOpts) (*rpc.GetAccountInfoResult, error) { - ret := _m.Called(ctx, addr, opts) - - if len(ret) == 0 { - panic("no return value specified for GetAccountInfoWithOpts") - } - - var r0 *rpc.GetAccountInfoResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, *rpc.GetAccountInfoOpts) (*rpc.GetAccountInfoResult, error)); ok { - return rf(ctx, addr, opts) - } - if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, *rpc.GetAccountInfoOpts) *rpc.GetAccountInfoResult); ok { - r0 = rf(ctx, addr, opts) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.GetAccountInfoResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, *rpc.GetAccountInfoOpts) error); ok { - r1 = rf(ctx, addr, opts) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetBlock provides a mock function with given fields: ctx, slot -func (_m *ReaderWriter) GetBlock(ctx context.Context, slot uint64) (*rpc.GetBlockResult, error) { - ret := _m.Called(ctx, slot) - - if len(ret) == 0 { - panic("no return value specified for GetBlock") - } - - var r0 *rpc.GetBlockResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64) (*rpc.GetBlockResult, error)); ok { - return rf(ctx, slot) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64) *rpc.GetBlockResult); ok { - r0 = rf(ctx, slot) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.GetBlockResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { - r1 = rf(ctx, slot) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetBlocksWithLimit provides a mock function with given fields: ctx, startSlot, limit -func (_m *ReaderWriter) GetBlocksWithLimit(ctx context.Context, startSlot uint64, limit uint64) (*rpc.BlocksResult, error) { - ret := _m.Called(ctx, startSlot, limit) - - if len(ret) == 0 { - panic("no return value specified for GetBlocksWithLimit") - } - - var r0 *rpc.BlocksResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) (*rpc.BlocksResult, error)); ok { - return rf(ctx, startSlot, limit) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) *rpc.BlocksResult); ok { - r0 = rf(ctx, startSlot, limit) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.BlocksResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { - r1 = rf(ctx, startSlot, limit) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetFeeForMessage provides a mock function with given fields: ctx, msg -func (_m *ReaderWriter) GetFeeForMessage(ctx context.Context, msg string) (uint64, error) { - ret := _m.Called(ctx, msg) - - if len(ret) == 0 { - panic("no return value specified for GetFeeForMessage") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) (uint64, error)); ok { - return rf(ctx, msg) - } - if rf, ok := ret.Get(0).(func(context.Context, string) uint64); ok { - r0 = rf(ctx, msg) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(ctx, msg) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetLatestBlock provides a mock function with given fields: ctx -func (_m *ReaderWriter) GetLatestBlock(ctx context.Context) (*rpc.GetBlockResult, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GetLatestBlock") - } - - var r0 *rpc.GetBlockResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*rpc.GetBlockResult, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *rpc.GetBlockResult); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.GetBlockResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// LatestBlockhash provides a mock function with given fields: ctx -func (_m *ReaderWriter) LatestBlockhash(ctx context.Context) (*rpc.GetLatestBlockhashResult, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for LatestBlockhash") - } - - var r0 *rpc.GetLatestBlockhashResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*rpc.GetLatestBlockhashResult, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *rpc.GetLatestBlockhashResult); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.GetLatestBlockhashResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SendTx provides a mock function with given fields: ctx, tx -func (_m *ReaderWriter) SendTx(ctx context.Context, tx *solana.Transaction) (solana.Signature, error) { - ret := _m.Called(ctx, tx) - - if len(ret) == 0 { - panic("no return value specified for SendTx") - } - - var r0 solana.Signature - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *solana.Transaction) (solana.Signature, error)); ok { - return rf(ctx, tx) - } - if rf, ok := ret.Get(0).(func(context.Context, *solana.Transaction) solana.Signature); ok { - r0 = rf(ctx, tx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(solana.Signature) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *solana.Transaction) error); ok { - r1 = rf(ctx, tx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SignatureStatuses provides a mock function with given fields: ctx, sigs -func (_m *ReaderWriter) SignatureStatuses(ctx context.Context, sigs []solana.Signature) ([]*rpc.SignatureStatusesResult, error) { - ret := _m.Called(ctx, sigs) - - if len(ret) == 0 { - panic("no return value specified for SignatureStatuses") - } - - var r0 []*rpc.SignatureStatusesResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []solana.Signature) ([]*rpc.SignatureStatusesResult, error)); ok { - return rf(ctx, sigs) - } - if rf, ok := ret.Get(0).(func(context.Context, []solana.Signature) []*rpc.SignatureStatusesResult); ok { - r0 = rf(ctx, sigs) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*rpc.SignatureStatusesResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, []solana.Signature) error); ok { - r1 = rf(ctx, sigs) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SimulateTx provides a mock function with given fields: ctx, tx, opts -func (_m *ReaderWriter) SimulateTx(ctx context.Context, tx *solana.Transaction, opts *rpc.SimulateTransactionOpts) (*rpc.SimulateTransactionResult, error) { - ret := _m.Called(ctx, tx, opts) - - if len(ret) == 0 { - panic("no return value specified for SimulateTx") - } - - var r0 *rpc.SimulateTransactionResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *solana.Transaction, *rpc.SimulateTransactionOpts) (*rpc.SimulateTransactionResult, error)); ok { - return rf(ctx, tx, opts) - } - if rf, ok := ret.Get(0).(func(context.Context, *solana.Transaction, *rpc.SimulateTransactionOpts) *rpc.SimulateTransactionResult); ok { - r0 = rf(ctx, tx, opts) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.SimulateTransactionResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *solana.Transaction, *rpc.SimulateTransactionOpts) error); ok { - r1 = rf(ctx, tx, opts) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SlotHeight provides a mock function with given fields: ctx -func (_m *ReaderWriter) SlotHeight(ctx context.Context) (uint64, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SlotHeight") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NewReaderWriter creates a new instance of ReaderWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewReaderWriter(t interface { - mock.TestingT - Cleanup(func()) -}) *ReaderWriter { - mock := &ReaderWriter{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/pkg/solana/client/mocks/reader_writer.go b/pkg/solana/client/mocks/reader_writer.go new file mode 100644 index 000000000..86285fdf5 --- /dev/null +++ b/pkg/solana/client/mocks/reader_writer.go @@ -0,0 +1,739 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + rpc "github.com/gagliardetto/solana-go/rpc" + multinode "github.com/smartcontractkit/chainlink-solana/pkg/solana/client/multinode" + mock "github.com/stretchr/testify/mock" + + solana "github.com/gagliardetto/solana-go" +) + +// ReaderWriter is an autogenerated mock type for the ReaderWriter type +type ReaderWriter struct { + mock.Mock +} + +type ReaderWriter_Expecter struct { + mock *mock.Mock +} + +func (_m *ReaderWriter) EXPECT() *ReaderWriter_Expecter { + return &ReaderWriter_Expecter{mock: &_m.Mock} +} + +// Balance provides a mock function with given fields: ctx, addr +func (_m *ReaderWriter) Balance(ctx context.Context, addr solana.PublicKey) (uint64, error) { + ret := _m.Called(ctx, addr) + + if len(ret) == 0 { + panic("no return value specified for Balance") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey) (uint64, error)); ok { + return rf(ctx, addr) + } + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey) uint64); ok { + r0 = rf(ctx, addr) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey) error); ok { + r1 = rf(ctx, addr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_Balance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Balance' +type ReaderWriter_Balance_Call struct { + *mock.Call +} + +// Balance is a helper method to define mock.On call +// - ctx context.Context +// - addr solana.PublicKey +func (_e *ReaderWriter_Expecter) Balance(ctx interface{}, addr interface{}) *ReaderWriter_Balance_Call { + return &ReaderWriter_Balance_Call{Call: _e.mock.On("Balance", ctx, addr)} +} + +func (_c *ReaderWriter_Balance_Call) Run(run func(ctx context.Context, addr solana.PublicKey)) *ReaderWriter_Balance_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(solana.PublicKey)) + }) + return _c +} + +func (_c *ReaderWriter_Balance_Call) Return(_a0 uint64, _a1 error) *ReaderWriter_Balance_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_Balance_Call) RunAndReturn(run func(context.Context, solana.PublicKey) (uint64, error)) *ReaderWriter_Balance_Call { + _c.Call.Return(run) + return _c +} + +// ChainID provides a mock function with given fields: ctx +func (_m *ReaderWriter) ChainID(ctx context.Context) (multinode.StringID, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 multinode.StringID + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (multinode.StringID, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) multinode.StringID); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(multinode.StringID) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' +type ReaderWriter_ChainID_Call struct { + *mock.Call +} + +// ChainID is a helper method to define mock.On call +// - ctx context.Context +func (_e *ReaderWriter_Expecter) ChainID(ctx interface{}) *ReaderWriter_ChainID_Call { + return &ReaderWriter_ChainID_Call{Call: _e.mock.On("ChainID", ctx)} +} + +func (_c *ReaderWriter_ChainID_Call) Run(run func(ctx context.Context)) *ReaderWriter_ChainID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ReaderWriter_ChainID_Call) Return(_a0 multinode.StringID, _a1 error) *ReaderWriter_ChainID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_ChainID_Call) RunAndReturn(run func(context.Context) (multinode.StringID, error)) *ReaderWriter_ChainID_Call { + _c.Call.Return(run) + return _c +} + +// GetAccountInfoWithOpts provides a mock function with given fields: ctx, addr, opts +func (_m *ReaderWriter) GetAccountInfoWithOpts(ctx context.Context, addr solana.PublicKey, opts *rpc.GetAccountInfoOpts) (*rpc.GetAccountInfoResult, error) { + ret := _m.Called(ctx, addr, opts) + + if len(ret) == 0 { + panic("no return value specified for GetAccountInfoWithOpts") + } + + var r0 *rpc.GetAccountInfoResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, *rpc.GetAccountInfoOpts) (*rpc.GetAccountInfoResult, error)); ok { + return rf(ctx, addr, opts) + } + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, *rpc.GetAccountInfoOpts) *rpc.GetAccountInfoResult); ok { + r0 = rf(ctx, addr, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.GetAccountInfoResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, *rpc.GetAccountInfoOpts) error); ok { + r1 = rf(ctx, addr, opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_GetAccountInfoWithOpts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAccountInfoWithOpts' +type ReaderWriter_GetAccountInfoWithOpts_Call struct { + *mock.Call +} + +// GetAccountInfoWithOpts is a helper method to define mock.On call +// - ctx context.Context +// - addr solana.PublicKey +// - opts *rpc.GetAccountInfoOpts +func (_e *ReaderWriter_Expecter) GetAccountInfoWithOpts(ctx interface{}, addr interface{}, opts interface{}) *ReaderWriter_GetAccountInfoWithOpts_Call { + return &ReaderWriter_GetAccountInfoWithOpts_Call{Call: _e.mock.On("GetAccountInfoWithOpts", ctx, addr, opts)} +} + +func (_c *ReaderWriter_GetAccountInfoWithOpts_Call) Run(run func(ctx context.Context, addr solana.PublicKey, opts *rpc.GetAccountInfoOpts)) *ReaderWriter_GetAccountInfoWithOpts_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(solana.PublicKey), args[2].(*rpc.GetAccountInfoOpts)) + }) + return _c +} + +func (_c *ReaderWriter_GetAccountInfoWithOpts_Call) Return(_a0 *rpc.GetAccountInfoResult, _a1 error) *ReaderWriter_GetAccountInfoWithOpts_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_GetAccountInfoWithOpts_Call) RunAndReturn(run func(context.Context, solana.PublicKey, *rpc.GetAccountInfoOpts) (*rpc.GetAccountInfoResult, error)) *ReaderWriter_GetAccountInfoWithOpts_Call { + _c.Call.Return(run) + return _c +} + +// GetBlock provides a mock function with given fields: ctx, slot +func (_m *ReaderWriter) GetBlock(ctx context.Context, slot uint64) (*rpc.GetBlockResult, error) { + ret := _m.Called(ctx, slot) + + if len(ret) == 0 { + panic("no return value specified for GetBlock") + } + + var r0 *rpc.GetBlockResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*rpc.GetBlockResult, error)); ok { + return rf(ctx, slot) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *rpc.GetBlockResult); ok { + r0 = rf(ctx, slot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.GetBlockResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, slot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_GetBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlock' +type ReaderWriter_GetBlock_Call struct { + *mock.Call +} + +// GetBlock is a helper method to define mock.On call +// - ctx context.Context +// - slot uint64 +func (_e *ReaderWriter_Expecter) GetBlock(ctx interface{}, slot interface{}) *ReaderWriter_GetBlock_Call { + return &ReaderWriter_GetBlock_Call{Call: _e.mock.On("GetBlock", ctx, slot)} +} + +func (_c *ReaderWriter_GetBlock_Call) Run(run func(ctx context.Context, slot uint64)) *ReaderWriter_GetBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *ReaderWriter_GetBlock_Call) Return(_a0 *rpc.GetBlockResult, _a1 error) *ReaderWriter_GetBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_GetBlock_Call) RunAndReturn(run func(context.Context, uint64) (*rpc.GetBlockResult, error)) *ReaderWriter_GetBlock_Call { + _c.Call.Return(run) + return _c +} + +// GetBlocksWithLimit provides a mock function with given fields: ctx, startSlot, limit +func (_m *ReaderWriter) GetBlocksWithLimit(ctx context.Context, startSlot uint64, limit uint64) (*rpc.BlocksResult, error) { + ret := _m.Called(ctx, startSlot, limit) + + if len(ret) == 0 { + panic("no return value specified for GetBlocksWithLimit") + } + + var r0 *rpc.BlocksResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) (*rpc.BlocksResult, error)); ok { + return rf(ctx, startSlot, limit) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) *rpc.BlocksResult); ok { + r0 = rf(ctx, startSlot, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.BlocksResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { + r1 = rf(ctx, startSlot, limit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_GetBlocksWithLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlocksWithLimit' +type ReaderWriter_GetBlocksWithLimit_Call struct { + *mock.Call +} + +// GetBlocksWithLimit is a helper method to define mock.On call +// - ctx context.Context +// - startSlot uint64 +// - limit uint64 +func (_e *ReaderWriter_Expecter) GetBlocksWithLimit(ctx interface{}, startSlot interface{}, limit interface{}) *ReaderWriter_GetBlocksWithLimit_Call { + return &ReaderWriter_GetBlocksWithLimit_Call{Call: _e.mock.On("GetBlocksWithLimit", ctx, startSlot, limit)} +} + +func (_c *ReaderWriter_GetBlocksWithLimit_Call) Run(run func(ctx context.Context, startSlot uint64, limit uint64)) *ReaderWriter_GetBlocksWithLimit_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) + }) + return _c +} + +func (_c *ReaderWriter_GetBlocksWithLimit_Call) Return(_a0 *rpc.BlocksResult, _a1 error) *ReaderWriter_GetBlocksWithLimit_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_GetBlocksWithLimit_Call) RunAndReturn(run func(context.Context, uint64, uint64) (*rpc.BlocksResult, error)) *ReaderWriter_GetBlocksWithLimit_Call { + _c.Call.Return(run) + return _c +} + +// GetFeeForMessage provides a mock function with given fields: ctx, msg +func (_m *ReaderWriter) GetFeeForMessage(ctx context.Context, msg string) (uint64, error) { + ret := _m.Called(ctx, msg) + + if len(ret) == 0 { + panic("no return value specified for GetFeeForMessage") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (uint64, error)); ok { + return rf(ctx, msg) + } + if rf, ok := ret.Get(0).(func(context.Context, string) uint64); ok { + r0 = rf(ctx, msg) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, msg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_GetFeeForMessage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFeeForMessage' +type ReaderWriter_GetFeeForMessage_Call struct { + *mock.Call +} + +// GetFeeForMessage is a helper method to define mock.On call +// - ctx context.Context +// - msg string +func (_e *ReaderWriter_Expecter) GetFeeForMessage(ctx interface{}, msg interface{}) *ReaderWriter_GetFeeForMessage_Call { + return &ReaderWriter_GetFeeForMessage_Call{Call: _e.mock.On("GetFeeForMessage", ctx, msg)} +} + +func (_c *ReaderWriter_GetFeeForMessage_Call) Run(run func(ctx context.Context, msg string)) *ReaderWriter_GetFeeForMessage_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *ReaderWriter_GetFeeForMessage_Call) Return(_a0 uint64, _a1 error) *ReaderWriter_GetFeeForMessage_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_GetFeeForMessage_Call) RunAndReturn(run func(context.Context, string) (uint64, error)) *ReaderWriter_GetFeeForMessage_Call { + _c.Call.Return(run) + return _c +} + +// GetLatestBlock provides a mock function with given fields: ctx +func (_m *ReaderWriter) GetLatestBlock(ctx context.Context) (*rpc.GetBlockResult, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLatestBlock") + } + + var r0 *rpc.GetBlockResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*rpc.GetBlockResult, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *rpc.GetBlockResult); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.GetBlockResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_GetLatestBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestBlock' +type ReaderWriter_GetLatestBlock_Call struct { + *mock.Call +} + +// GetLatestBlock is a helper method to define mock.On call +// - ctx context.Context +func (_e *ReaderWriter_Expecter) GetLatestBlock(ctx interface{}) *ReaderWriter_GetLatestBlock_Call { + return &ReaderWriter_GetLatestBlock_Call{Call: _e.mock.On("GetLatestBlock", ctx)} +} + +func (_c *ReaderWriter_GetLatestBlock_Call) Run(run func(ctx context.Context)) *ReaderWriter_GetLatestBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ReaderWriter_GetLatestBlock_Call) Return(_a0 *rpc.GetBlockResult, _a1 error) *ReaderWriter_GetLatestBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_GetLatestBlock_Call) RunAndReturn(run func(context.Context) (*rpc.GetBlockResult, error)) *ReaderWriter_GetLatestBlock_Call { + _c.Call.Return(run) + return _c +} + +// LatestBlockhash provides a mock function with given fields: ctx +func (_m *ReaderWriter) LatestBlockhash(ctx context.Context) (*rpc.GetLatestBlockhashResult, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for LatestBlockhash") + } + + var r0 *rpc.GetLatestBlockhashResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*rpc.GetLatestBlockhashResult, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *rpc.GetLatestBlockhashResult); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.GetLatestBlockhashResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_LatestBlockhash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestBlockhash' +type ReaderWriter_LatestBlockhash_Call struct { + *mock.Call +} + +// LatestBlockhash is a helper method to define mock.On call +// - ctx context.Context +func (_e *ReaderWriter_Expecter) LatestBlockhash(ctx interface{}) *ReaderWriter_LatestBlockhash_Call { + return &ReaderWriter_LatestBlockhash_Call{Call: _e.mock.On("LatestBlockhash", ctx)} +} + +func (_c *ReaderWriter_LatestBlockhash_Call) Run(run func(ctx context.Context)) *ReaderWriter_LatestBlockhash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ReaderWriter_LatestBlockhash_Call) Return(_a0 *rpc.GetLatestBlockhashResult, _a1 error) *ReaderWriter_LatestBlockhash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_LatestBlockhash_Call) RunAndReturn(run func(context.Context) (*rpc.GetLatestBlockhashResult, error)) *ReaderWriter_LatestBlockhash_Call { + _c.Call.Return(run) + return _c +} + +// SendTx provides a mock function with given fields: ctx, tx +func (_m *ReaderWriter) SendTx(ctx context.Context, tx *solana.Transaction) (solana.Signature, error) { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendTx") + } + + var r0 solana.Signature + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *solana.Transaction) (solana.Signature, error)); ok { + return rf(ctx, tx) + } + if rf, ok := ret.Get(0).(func(context.Context, *solana.Transaction) solana.Signature); ok { + r0 = rf(ctx, tx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(solana.Signature) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *solana.Transaction) error); ok { + r1 = rf(ctx, tx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_SendTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendTx' +type ReaderWriter_SendTx_Call struct { + *mock.Call +} + +// SendTx is a helper method to define mock.On call +// - ctx context.Context +// - tx *solana.Transaction +func (_e *ReaderWriter_Expecter) SendTx(ctx interface{}, tx interface{}) *ReaderWriter_SendTx_Call { + return &ReaderWriter_SendTx_Call{Call: _e.mock.On("SendTx", ctx, tx)} +} + +func (_c *ReaderWriter_SendTx_Call) Run(run func(ctx context.Context, tx *solana.Transaction)) *ReaderWriter_SendTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*solana.Transaction)) + }) + return _c +} + +func (_c *ReaderWriter_SendTx_Call) Return(_a0 solana.Signature, _a1 error) *ReaderWriter_SendTx_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_SendTx_Call) RunAndReturn(run func(context.Context, *solana.Transaction) (solana.Signature, error)) *ReaderWriter_SendTx_Call { + _c.Call.Return(run) + return _c +} + +// SignatureStatuses provides a mock function with given fields: ctx, sigs +func (_m *ReaderWriter) SignatureStatuses(ctx context.Context, sigs []solana.Signature) ([]*rpc.SignatureStatusesResult, error) { + ret := _m.Called(ctx, sigs) + + if len(ret) == 0 { + panic("no return value specified for SignatureStatuses") + } + + var r0 []*rpc.SignatureStatusesResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []solana.Signature) ([]*rpc.SignatureStatusesResult, error)); ok { + return rf(ctx, sigs) + } + if rf, ok := ret.Get(0).(func(context.Context, []solana.Signature) []*rpc.SignatureStatusesResult); ok { + r0 = rf(ctx, sigs) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*rpc.SignatureStatusesResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []solana.Signature) error); ok { + r1 = rf(ctx, sigs) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_SignatureStatuses_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SignatureStatuses' +type ReaderWriter_SignatureStatuses_Call struct { + *mock.Call +} + +// SignatureStatuses is a helper method to define mock.On call +// - ctx context.Context +// - sigs []solana.Signature +func (_e *ReaderWriter_Expecter) SignatureStatuses(ctx interface{}, sigs interface{}) *ReaderWriter_SignatureStatuses_Call { + return &ReaderWriter_SignatureStatuses_Call{Call: _e.mock.On("SignatureStatuses", ctx, sigs)} +} + +func (_c *ReaderWriter_SignatureStatuses_Call) Run(run func(ctx context.Context, sigs []solana.Signature)) *ReaderWriter_SignatureStatuses_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]solana.Signature)) + }) + return _c +} + +func (_c *ReaderWriter_SignatureStatuses_Call) Return(_a0 []*rpc.SignatureStatusesResult, _a1 error) *ReaderWriter_SignatureStatuses_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_SignatureStatuses_Call) RunAndReturn(run func(context.Context, []solana.Signature) ([]*rpc.SignatureStatusesResult, error)) *ReaderWriter_SignatureStatuses_Call { + _c.Call.Return(run) + return _c +} + +// SimulateTx provides a mock function with given fields: ctx, tx, opts +func (_m *ReaderWriter) SimulateTx(ctx context.Context, tx *solana.Transaction, opts *rpc.SimulateTransactionOpts) (*rpc.SimulateTransactionResult, error) { + ret := _m.Called(ctx, tx, opts) + + if len(ret) == 0 { + panic("no return value specified for SimulateTx") + } + + var r0 *rpc.SimulateTransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *solana.Transaction, *rpc.SimulateTransactionOpts) (*rpc.SimulateTransactionResult, error)); ok { + return rf(ctx, tx, opts) + } + if rf, ok := ret.Get(0).(func(context.Context, *solana.Transaction, *rpc.SimulateTransactionOpts) *rpc.SimulateTransactionResult); ok { + r0 = rf(ctx, tx, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.SimulateTransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *solana.Transaction, *rpc.SimulateTransactionOpts) error); ok { + r1 = rf(ctx, tx, opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_SimulateTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SimulateTx' +type ReaderWriter_SimulateTx_Call struct { + *mock.Call +} + +// SimulateTx is a helper method to define mock.On call +// - ctx context.Context +// - tx *solana.Transaction +// - opts *rpc.SimulateTransactionOpts +func (_e *ReaderWriter_Expecter) SimulateTx(ctx interface{}, tx interface{}, opts interface{}) *ReaderWriter_SimulateTx_Call { + return &ReaderWriter_SimulateTx_Call{Call: _e.mock.On("SimulateTx", ctx, tx, opts)} +} + +func (_c *ReaderWriter_SimulateTx_Call) Run(run func(ctx context.Context, tx *solana.Transaction, opts *rpc.SimulateTransactionOpts)) *ReaderWriter_SimulateTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*solana.Transaction), args[2].(*rpc.SimulateTransactionOpts)) + }) + return _c +} + +func (_c *ReaderWriter_SimulateTx_Call) Return(_a0 *rpc.SimulateTransactionResult, _a1 error) *ReaderWriter_SimulateTx_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_SimulateTx_Call) RunAndReturn(run func(context.Context, *solana.Transaction, *rpc.SimulateTransactionOpts) (*rpc.SimulateTransactionResult, error)) *ReaderWriter_SimulateTx_Call { + _c.Call.Return(run) + return _c +} + +// SlotHeight provides a mock function with given fields: ctx +func (_m *ReaderWriter) SlotHeight(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SlotHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReaderWriter_SlotHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SlotHeight' +type ReaderWriter_SlotHeight_Call struct { + *mock.Call +} + +// SlotHeight is a helper method to define mock.On call +// - ctx context.Context +func (_e *ReaderWriter_Expecter) SlotHeight(ctx interface{}) *ReaderWriter_SlotHeight_Call { + return &ReaderWriter_SlotHeight_Call{Call: _e.mock.On("SlotHeight", ctx)} +} + +func (_c *ReaderWriter_SlotHeight_Call) Run(run func(ctx context.Context)) *ReaderWriter_SlotHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ReaderWriter_SlotHeight_Call) Return(_a0 uint64, _a1 error) *ReaderWriter_SlotHeight_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReaderWriter_SlotHeight_Call) RunAndReturn(run func(context.Context) (uint64, error)) *ReaderWriter_SlotHeight_Call { + _c.Call.Return(run) + return _c +} + +// NewReaderWriter creates a new instance of ReaderWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReaderWriter(t interface { + mock.TestingT + Cleanup(func()) +}) *ReaderWriter { + mock := &ReaderWriter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/solana/config/config.go b/pkg/solana/config/config.go index 28d7ac5fb..7700b7a64 100644 --- a/pkg/solana/config/config.go +++ b/pkg/solana/config/config.go @@ -35,7 +35,6 @@ var defaultConfigSet = Chain{ EstimateComputeUnitLimit: ptr(false), // set to false to disable compute unit limit estimation } -//go:generate mockery --name Config --output ./mocks/ --case=underscore --filename config.go type Config interface { BalancePollPeriod() time.Duration ConfirmPollPeriod() time.Duration diff --git a/pkg/solana/config/mocks/config.go b/pkg/solana/config/mocks/config.go index feef5c3c6..6f9ab913d 100644 --- a/pkg/solana/config/mocks/config.go +++ b/pkg/solana/config/mocks/config.go @@ -14,6 +14,14 @@ type Config struct { mock.Mock } +type Config_Expecter struct { + mock *mock.Mock +} + +func (_m *Config) EXPECT() *Config_Expecter { + return &Config_Expecter{mock: &_m.Mock} +} + // BalancePollPeriod provides a mock function with given fields: func (_m *Config) BalancePollPeriod() time.Duration { ret := _m.Called() @@ -32,6 +40,33 @@ func (_m *Config) BalancePollPeriod() time.Duration { return r0 } +// Config_BalancePollPeriod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BalancePollPeriod' +type Config_BalancePollPeriod_Call struct { + *mock.Call +} + +// BalancePollPeriod is a helper method to define mock.On call +func (_e *Config_Expecter) BalancePollPeriod() *Config_BalancePollPeriod_Call { + return &Config_BalancePollPeriod_Call{Call: _e.mock.On("BalancePollPeriod")} +} + +func (_c *Config_BalancePollPeriod_Call) Run(run func()) *Config_BalancePollPeriod_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_BalancePollPeriod_Call) Return(_a0 time.Duration) *Config_BalancePollPeriod_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_BalancePollPeriod_Call) RunAndReturn(run func() time.Duration) *Config_BalancePollPeriod_Call { + _c.Call.Return(run) + return _c +} + // BlockHistoryPollPeriod provides a mock function with given fields: func (_m *Config) BlockHistoryPollPeriod() time.Duration { ret := _m.Called() @@ -50,6 +85,33 @@ func (_m *Config) BlockHistoryPollPeriod() time.Duration { return r0 } +// Config_BlockHistoryPollPeriod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockHistoryPollPeriod' +type Config_BlockHistoryPollPeriod_Call struct { + *mock.Call +} + +// BlockHistoryPollPeriod is a helper method to define mock.On call +func (_e *Config_Expecter) BlockHistoryPollPeriod() *Config_BlockHistoryPollPeriod_Call { + return &Config_BlockHistoryPollPeriod_Call{Call: _e.mock.On("BlockHistoryPollPeriod")} +} + +func (_c *Config_BlockHistoryPollPeriod_Call) Run(run func()) *Config_BlockHistoryPollPeriod_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_BlockHistoryPollPeriod_Call) Return(_a0 time.Duration) *Config_BlockHistoryPollPeriod_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_BlockHistoryPollPeriod_Call) RunAndReturn(run func() time.Duration) *Config_BlockHistoryPollPeriod_Call { + _c.Call.Return(run) + return _c +} + // BlockHistorySize provides a mock function with given fields: func (_m *Config) BlockHistorySize() uint64 { ret := _m.Called() @@ -68,6 +130,33 @@ func (_m *Config) BlockHistorySize() uint64 { return r0 } +// Config_BlockHistorySize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockHistorySize' +type Config_BlockHistorySize_Call struct { + *mock.Call +} + +// BlockHistorySize is a helper method to define mock.On call +func (_e *Config_Expecter) BlockHistorySize() *Config_BlockHistorySize_Call { + return &Config_BlockHistorySize_Call{Call: _e.mock.On("BlockHistorySize")} +} + +func (_c *Config_BlockHistorySize_Call) Run(run func()) *Config_BlockHistorySize_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_BlockHistorySize_Call) Return(_a0 uint64) *Config_BlockHistorySize_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_BlockHistorySize_Call) RunAndReturn(run func() uint64) *Config_BlockHistorySize_Call { + _c.Call.Return(run) + return _c +} + // Commitment provides a mock function with given fields: func (_m *Config) Commitment() rpc.CommitmentType { ret := _m.Called() @@ -86,6 +175,33 @@ func (_m *Config) Commitment() rpc.CommitmentType { return r0 } +// Config_Commitment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Commitment' +type Config_Commitment_Call struct { + *mock.Call +} + +// Commitment is a helper method to define mock.On call +func (_e *Config_Expecter) Commitment() *Config_Commitment_Call { + return &Config_Commitment_Call{Call: _e.mock.On("Commitment")} +} + +func (_c *Config_Commitment_Call) Run(run func()) *Config_Commitment_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_Commitment_Call) Return(_a0 rpc.CommitmentType) *Config_Commitment_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_Commitment_Call) RunAndReturn(run func() rpc.CommitmentType) *Config_Commitment_Call { + _c.Call.Return(run) + return _c +} + // ComputeUnitLimitDefault provides a mock function with given fields: func (_m *Config) ComputeUnitLimitDefault() uint32 { ret := _m.Called() @@ -104,6 +220,33 @@ func (_m *Config) ComputeUnitLimitDefault() uint32 { return r0 } +// Config_ComputeUnitLimitDefault_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ComputeUnitLimitDefault' +type Config_ComputeUnitLimitDefault_Call struct { + *mock.Call +} + +// ComputeUnitLimitDefault is a helper method to define mock.On call +func (_e *Config_Expecter) ComputeUnitLimitDefault() *Config_ComputeUnitLimitDefault_Call { + return &Config_ComputeUnitLimitDefault_Call{Call: _e.mock.On("ComputeUnitLimitDefault")} +} + +func (_c *Config_ComputeUnitLimitDefault_Call) Run(run func()) *Config_ComputeUnitLimitDefault_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_ComputeUnitLimitDefault_Call) Return(_a0 uint32) *Config_ComputeUnitLimitDefault_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_ComputeUnitLimitDefault_Call) RunAndReturn(run func() uint32) *Config_ComputeUnitLimitDefault_Call { + _c.Call.Return(run) + return _c +} + // ComputeUnitPriceDefault provides a mock function with given fields: func (_m *Config) ComputeUnitPriceDefault() uint64 { ret := _m.Called() @@ -122,6 +265,33 @@ func (_m *Config) ComputeUnitPriceDefault() uint64 { return r0 } +// Config_ComputeUnitPriceDefault_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ComputeUnitPriceDefault' +type Config_ComputeUnitPriceDefault_Call struct { + *mock.Call +} + +// ComputeUnitPriceDefault is a helper method to define mock.On call +func (_e *Config_Expecter) ComputeUnitPriceDefault() *Config_ComputeUnitPriceDefault_Call { + return &Config_ComputeUnitPriceDefault_Call{Call: _e.mock.On("ComputeUnitPriceDefault")} +} + +func (_c *Config_ComputeUnitPriceDefault_Call) Run(run func()) *Config_ComputeUnitPriceDefault_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_ComputeUnitPriceDefault_Call) Return(_a0 uint64) *Config_ComputeUnitPriceDefault_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_ComputeUnitPriceDefault_Call) RunAndReturn(run func() uint64) *Config_ComputeUnitPriceDefault_Call { + _c.Call.Return(run) + return _c +} + // ComputeUnitPriceMax provides a mock function with given fields: func (_m *Config) ComputeUnitPriceMax() uint64 { ret := _m.Called() @@ -140,6 +310,33 @@ func (_m *Config) ComputeUnitPriceMax() uint64 { return r0 } +// Config_ComputeUnitPriceMax_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ComputeUnitPriceMax' +type Config_ComputeUnitPriceMax_Call struct { + *mock.Call +} + +// ComputeUnitPriceMax is a helper method to define mock.On call +func (_e *Config_Expecter) ComputeUnitPriceMax() *Config_ComputeUnitPriceMax_Call { + return &Config_ComputeUnitPriceMax_Call{Call: _e.mock.On("ComputeUnitPriceMax")} +} + +func (_c *Config_ComputeUnitPriceMax_Call) Run(run func()) *Config_ComputeUnitPriceMax_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_ComputeUnitPriceMax_Call) Return(_a0 uint64) *Config_ComputeUnitPriceMax_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_ComputeUnitPriceMax_Call) RunAndReturn(run func() uint64) *Config_ComputeUnitPriceMax_Call { + _c.Call.Return(run) + return _c +} + // ComputeUnitPriceMin provides a mock function with given fields: func (_m *Config) ComputeUnitPriceMin() uint64 { ret := _m.Called() @@ -158,6 +355,33 @@ func (_m *Config) ComputeUnitPriceMin() uint64 { return r0 } +// Config_ComputeUnitPriceMin_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ComputeUnitPriceMin' +type Config_ComputeUnitPriceMin_Call struct { + *mock.Call +} + +// ComputeUnitPriceMin is a helper method to define mock.On call +func (_e *Config_Expecter) ComputeUnitPriceMin() *Config_ComputeUnitPriceMin_Call { + return &Config_ComputeUnitPriceMin_Call{Call: _e.mock.On("ComputeUnitPriceMin")} +} + +func (_c *Config_ComputeUnitPriceMin_Call) Run(run func()) *Config_ComputeUnitPriceMin_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_ComputeUnitPriceMin_Call) Return(_a0 uint64) *Config_ComputeUnitPriceMin_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_ComputeUnitPriceMin_Call) RunAndReturn(run func() uint64) *Config_ComputeUnitPriceMin_Call { + _c.Call.Return(run) + return _c +} + // ConfirmPollPeriod provides a mock function with given fields: func (_m *Config) ConfirmPollPeriod() time.Duration { ret := _m.Called() @@ -176,6 +400,33 @@ func (_m *Config) ConfirmPollPeriod() time.Duration { return r0 } +// Config_ConfirmPollPeriod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ConfirmPollPeriod' +type Config_ConfirmPollPeriod_Call struct { + *mock.Call +} + +// ConfirmPollPeriod is a helper method to define mock.On call +func (_e *Config_Expecter) ConfirmPollPeriod() *Config_ConfirmPollPeriod_Call { + return &Config_ConfirmPollPeriod_Call{Call: _e.mock.On("ConfirmPollPeriod")} +} + +func (_c *Config_ConfirmPollPeriod_Call) Run(run func()) *Config_ConfirmPollPeriod_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_ConfirmPollPeriod_Call) Return(_a0 time.Duration) *Config_ConfirmPollPeriod_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_ConfirmPollPeriod_Call) RunAndReturn(run func() time.Duration) *Config_ConfirmPollPeriod_Call { + _c.Call.Return(run) + return _c +} + // EstimateComputeUnitLimit provides a mock function with given fields: func (_m *Config) EstimateComputeUnitLimit() bool { ret := _m.Called() @@ -194,6 +445,33 @@ func (_m *Config) EstimateComputeUnitLimit() bool { return r0 } +// Config_EstimateComputeUnitLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateComputeUnitLimit' +type Config_EstimateComputeUnitLimit_Call struct { + *mock.Call +} + +// EstimateComputeUnitLimit is a helper method to define mock.On call +func (_e *Config_Expecter) EstimateComputeUnitLimit() *Config_EstimateComputeUnitLimit_Call { + return &Config_EstimateComputeUnitLimit_Call{Call: _e.mock.On("EstimateComputeUnitLimit")} +} + +func (_c *Config_EstimateComputeUnitLimit_Call) Run(run func()) *Config_EstimateComputeUnitLimit_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_EstimateComputeUnitLimit_Call) Return(_a0 bool) *Config_EstimateComputeUnitLimit_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_EstimateComputeUnitLimit_Call) RunAndReturn(run func() bool) *Config_EstimateComputeUnitLimit_Call { + _c.Call.Return(run) + return _c +} + // FeeBumpPeriod provides a mock function with given fields: func (_m *Config) FeeBumpPeriod() time.Duration { ret := _m.Called() @@ -212,6 +490,33 @@ func (_m *Config) FeeBumpPeriod() time.Duration { return r0 } +// Config_FeeBumpPeriod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FeeBumpPeriod' +type Config_FeeBumpPeriod_Call struct { + *mock.Call +} + +// FeeBumpPeriod is a helper method to define mock.On call +func (_e *Config_Expecter) FeeBumpPeriod() *Config_FeeBumpPeriod_Call { + return &Config_FeeBumpPeriod_Call{Call: _e.mock.On("FeeBumpPeriod")} +} + +func (_c *Config_FeeBumpPeriod_Call) Run(run func()) *Config_FeeBumpPeriod_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_FeeBumpPeriod_Call) Return(_a0 time.Duration) *Config_FeeBumpPeriod_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_FeeBumpPeriod_Call) RunAndReturn(run func() time.Duration) *Config_FeeBumpPeriod_Call { + _c.Call.Return(run) + return _c +} + // FeeEstimatorMode provides a mock function with given fields: func (_m *Config) FeeEstimatorMode() string { ret := _m.Called() @@ -230,6 +535,33 @@ func (_m *Config) FeeEstimatorMode() string { return r0 } +// Config_FeeEstimatorMode_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FeeEstimatorMode' +type Config_FeeEstimatorMode_Call struct { + *mock.Call +} + +// FeeEstimatorMode is a helper method to define mock.On call +func (_e *Config_Expecter) FeeEstimatorMode() *Config_FeeEstimatorMode_Call { + return &Config_FeeEstimatorMode_Call{Call: _e.mock.On("FeeEstimatorMode")} +} + +func (_c *Config_FeeEstimatorMode_Call) Run(run func()) *Config_FeeEstimatorMode_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_FeeEstimatorMode_Call) Return(_a0 string) *Config_FeeEstimatorMode_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_FeeEstimatorMode_Call) RunAndReturn(run func() string) *Config_FeeEstimatorMode_Call { + _c.Call.Return(run) + return _c +} + // MaxRetries provides a mock function with given fields: func (_m *Config) MaxRetries() *uint { ret := _m.Called() @@ -250,6 +582,33 @@ func (_m *Config) MaxRetries() *uint { return r0 } +// Config_MaxRetries_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MaxRetries' +type Config_MaxRetries_Call struct { + *mock.Call +} + +// MaxRetries is a helper method to define mock.On call +func (_e *Config_Expecter) MaxRetries() *Config_MaxRetries_Call { + return &Config_MaxRetries_Call{Call: _e.mock.On("MaxRetries")} +} + +func (_c *Config_MaxRetries_Call) Run(run func()) *Config_MaxRetries_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_MaxRetries_Call) Return(_a0 *uint) *Config_MaxRetries_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_MaxRetries_Call) RunAndReturn(run func() *uint) *Config_MaxRetries_Call { + _c.Call.Return(run) + return _c +} + // OCR2CachePollPeriod provides a mock function with given fields: func (_m *Config) OCR2CachePollPeriod() time.Duration { ret := _m.Called() @@ -268,6 +627,33 @@ func (_m *Config) OCR2CachePollPeriod() time.Duration { return r0 } +// Config_OCR2CachePollPeriod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OCR2CachePollPeriod' +type Config_OCR2CachePollPeriod_Call struct { + *mock.Call +} + +// OCR2CachePollPeriod is a helper method to define mock.On call +func (_e *Config_Expecter) OCR2CachePollPeriod() *Config_OCR2CachePollPeriod_Call { + return &Config_OCR2CachePollPeriod_Call{Call: _e.mock.On("OCR2CachePollPeriod")} +} + +func (_c *Config_OCR2CachePollPeriod_Call) Run(run func()) *Config_OCR2CachePollPeriod_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_OCR2CachePollPeriod_Call) Return(_a0 time.Duration) *Config_OCR2CachePollPeriod_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_OCR2CachePollPeriod_Call) RunAndReturn(run func() time.Duration) *Config_OCR2CachePollPeriod_Call { + _c.Call.Return(run) + return _c +} + // OCR2CacheTTL provides a mock function with given fields: func (_m *Config) OCR2CacheTTL() time.Duration { ret := _m.Called() @@ -286,6 +672,33 @@ func (_m *Config) OCR2CacheTTL() time.Duration { return r0 } +// Config_OCR2CacheTTL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OCR2CacheTTL' +type Config_OCR2CacheTTL_Call struct { + *mock.Call +} + +// OCR2CacheTTL is a helper method to define mock.On call +func (_e *Config_Expecter) OCR2CacheTTL() *Config_OCR2CacheTTL_Call { + return &Config_OCR2CacheTTL_Call{Call: _e.mock.On("OCR2CacheTTL")} +} + +func (_c *Config_OCR2CacheTTL_Call) Run(run func()) *Config_OCR2CacheTTL_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_OCR2CacheTTL_Call) Return(_a0 time.Duration) *Config_OCR2CacheTTL_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_OCR2CacheTTL_Call) RunAndReturn(run func() time.Duration) *Config_OCR2CacheTTL_Call { + _c.Call.Return(run) + return _c +} + // SkipPreflight provides a mock function with given fields: func (_m *Config) SkipPreflight() bool { ret := _m.Called() @@ -304,6 +717,33 @@ func (_m *Config) SkipPreflight() bool { return r0 } +// Config_SkipPreflight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SkipPreflight' +type Config_SkipPreflight_Call struct { + *mock.Call +} + +// SkipPreflight is a helper method to define mock.On call +func (_e *Config_Expecter) SkipPreflight() *Config_SkipPreflight_Call { + return &Config_SkipPreflight_Call{Call: _e.mock.On("SkipPreflight")} +} + +func (_c *Config_SkipPreflight_Call) Run(run func()) *Config_SkipPreflight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_SkipPreflight_Call) Return(_a0 bool) *Config_SkipPreflight_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_SkipPreflight_Call) RunAndReturn(run func() bool) *Config_SkipPreflight_Call { + _c.Call.Return(run) + return _c +} + // TxConfirmTimeout provides a mock function with given fields: func (_m *Config) TxConfirmTimeout() time.Duration { ret := _m.Called() @@ -322,6 +762,33 @@ func (_m *Config) TxConfirmTimeout() time.Duration { return r0 } +// Config_TxConfirmTimeout_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TxConfirmTimeout' +type Config_TxConfirmTimeout_Call struct { + *mock.Call +} + +// TxConfirmTimeout is a helper method to define mock.On call +func (_e *Config_Expecter) TxConfirmTimeout() *Config_TxConfirmTimeout_Call { + return &Config_TxConfirmTimeout_Call{Call: _e.mock.On("TxConfirmTimeout")} +} + +func (_c *Config_TxConfirmTimeout_Call) Run(run func()) *Config_TxConfirmTimeout_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_TxConfirmTimeout_Call) Return(_a0 time.Duration) *Config_TxConfirmTimeout_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_TxConfirmTimeout_Call) RunAndReturn(run func() time.Duration) *Config_TxConfirmTimeout_Call { + _c.Call.Return(run) + return _c +} + // TxRetentionTimeout provides a mock function with given fields: func (_m *Config) TxRetentionTimeout() time.Duration { ret := _m.Called() @@ -340,6 +807,33 @@ func (_m *Config) TxRetentionTimeout() time.Duration { return r0 } +// Config_TxRetentionTimeout_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TxRetentionTimeout' +type Config_TxRetentionTimeout_Call struct { + *mock.Call +} + +// TxRetentionTimeout is a helper method to define mock.On call +func (_e *Config_Expecter) TxRetentionTimeout() *Config_TxRetentionTimeout_Call { + return &Config_TxRetentionTimeout_Call{Call: _e.mock.On("TxRetentionTimeout")} +} + +func (_c *Config_TxRetentionTimeout_Call) Run(run func()) *Config_TxRetentionTimeout_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_TxRetentionTimeout_Call) Return(_a0 time.Duration) *Config_TxRetentionTimeout_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_TxRetentionTimeout_Call) RunAndReturn(run func() time.Duration) *Config_TxRetentionTimeout_Call { + _c.Call.Return(run) + return _c +} + // TxRetryTimeout provides a mock function with given fields: func (_m *Config) TxRetryTimeout() time.Duration { ret := _m.Called() @@ -358,6 +852,33 @@ func (_m *Config) TxRetryTimeout() time.Duration { return r0 } +// Config_TxRetryTimeout_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TxRetryTimeout' +type Config_TxRetryTimeout_Call struct { + *mock.Call +} + +// TxRetryTimeout is a helper method to define mock.On call +func (_e *Config_Expecter) TxRetryTimeout() *Config_TxRetryTimeout_Call { + return &Config_TxRetryTimeout_Call{Call: _e.mock.On("TxRetryTimeout")} +} + +func (_c *Config_TxRetryTimeout_Call) Run(run func()) *Config_TxRetryTimeout_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_TxRetryTimeout_Call) Return(_a0 time.Duration) *Config_TxRetryTimeout_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_TxRetryTimeout_Call) RunAndReturn(run func() time.Duration) *Config_TxRetryTimeout_Call { + _c.Call.Return(run) + return _c +} + // TxTimeout provides a mock function with given fields: func (_m *Config) TxTimeout() time.Duration { ret := _m.Called() @@ -376,6 +897,33 @@ func (_m *Config) TxTimeout() time.Duration { return r0 } +// Config_TxTimeout_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TxTimeout' +type Config_TxTimeout_Call struct { + *mock.Call +} + +// TxTimeout is a helper method to define mock.On call +func (_e *Config_Expecter) TxTimeout() *Config_TxTimeout_Call { + return &Config_TxTimeout_Call{Call: _e.mock.On("TxTimeout")} +} + +func (_c *Config_TxTimeout_Call) Run(run func()) *Config_TxTimeout_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Config_TxTimeout_Call) Return(_a0 time.Duration) *Config_TxTimeout_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Config_TxTimeout_Call) RunAndReturn(run func() time.Duration) *Config_TxTimeout_Call { + _c.Call.Return(run) + return _c +} + // NewConfig creates a new instance of Config. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewConfig(t interface { diff --git a/pkg/solana/fees/estimator.go b/pkg/solana/fees/estimator.go index aded6f4a6..f09fec95b 100644 --- a/pkg/solana/fees/estimator.go +++ b/pkg/solana/fees/estimator.go @@ -2,7 +2,6 @@ package fees import "context" -//go:generate mockery --name Estimator --output ./mocks/ type Estimator interface { Start(context.Context) error Close() error diff --git a/pkg/solana/fees/mocks/Estimator.go b/pkg/solana/fees/mocks/Estimator.go index a61b811a7..06d6a53ae 100644 --- a/pkg/solana/fees/mocks/Estimator.go +++ b/pkg/solana/fees/mocks/Estimator.go @@ -13,6 +13,14 @@ type Estimator struct { mock.Mock } +type Estimator_Expecter struct { + mock *mock.Mock +} + +func (_m *Estimator) EXPECT() *Estimator_Expecter { + return &Estimator_Expecter{mock: &_m.Mock} +} + // BaseComputeUnitPrice provides a mock function with given fields: func (_m *Estimator) BaseComputeUnitPrice() uint64 { ret := _m.Called() @@ -31,6 +39,33 @@ func (_m *Estimator) BaseComputeUnitPrice() uint64 { return r0 } +// Estimator_BaseComputeUnitPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BaseComputeUnitPrice' +type Estimator_BaseComputeUnitPrice_Call struct { + *mock.Call +} + +// BaseComputeUnitPrice is a helper method to define mock.On call +func (_e *Estimator_Expecter) BaseComputeUnitPrice() *Estimator_BaseComputeUnitPrice_Call { + return &Estimator_BaseComputeUnitPrice_Call{Call: _e.mock.On("BaseComputeUnitPrice")} +} + +func (_c *Estimator_BaseComputeUnitPrice_Call) Run(run func()) *Estimator_BaseComputeUnitPrice_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Estimator_BaseComputeUnitPrice_Call) Return(_a0 uint64) *Estimator_BaseComputeUnitPrice_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Estimator_BaseComputeUnitPrice_Call) RunAndReturn(run func() uint64) *Estimator_BaseComputeUnitPrice_Call { + _c.Call.Return(run) + return _c +} + // Close provides a mock function with given fields: func (_m *Estimator) Close() error { ret := _m.Called() @@ -49,6 +84,33 @@ func (_m *Estimator) Close() error { return r0 } +// Estimator_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type Estimator_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *Estimator_Expecter) Close() *Estimator_Close_Call { + return &Estimator_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *Estimator_Close_Call) Run(run func()) *Estimator_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Estimator_Close_Call) Return(_a0 error) *Estimator_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Estimator_Close_Call) RunAndReturn(run func() error) *Estimator_Close_Call { + _c.Call.Return(run) + return _c +} + // Start provides a mock function with given fields: _a0 func (_m *Estimator) Start(_a0 context.Context) error { ret := _m.Called(_a0) @@ -67,6 +129,34 @@ func (_m *Estimator) Start(_a0 context.Context) error { return r0 } +// Estimator_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type Estimator_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - _a0 context.Context +func (_e *Estimator_Expecter) Start(_a0 interface{}) *Estimator_Start_Call { + return &Estimator_Start_Call{Call: _e.mock.On("Start", _a0)} +} + +func (_c *Estimator_Start_Call) Run(run func(_a0 context.Context)) *Estimator_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Estimator_Start_Call) Return(_a0 error) *Estimator_Start_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Estimator_Start_Call) RunAndReturn(run func(context.Context) error) *Estimator_Start_Call { + _c.Call.Return(run) + return _c +} + // NewEstimator creates a new instance of Estimator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewEstimator(t interface { diff --git a/pkg/solana/txm/mocks/simple_keystore.go b/pkg/solana/txm/mocks/simple_keystore.go index 1c0bd6562..655869cd7 100644 --- a/pkg/solana/txm/mocks/simple_keystore.go +++ b/pkg/solana/txm/mocks/simple_keystore.go @@ -13,6 +13,14 @@ type SimpleKeystore struct { mock.Mock } +type SimpleKeystore_Expecter struct { + mock *mock.Mock +} + +func (_m *SimpleKeystore) EXPECT() *SimpleKeystore_Expecter { + return &SimpleKeystore_Expecter{mock: &_m.Mock} +} + // Accounts provides a mock function with given fields: ctx func (_m *SimpleKeystore) Accounts(ctx context.Context) ([]string, error) { ret := _m.Called(ctx) @@ -43,6 +51,34 @@ func (_m *SimpleKeystore) Accounts(ctx context.Context) ([]string, error) { return r0, r1 } +// SimpleKeystore_Accounts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Accounts' +type SimpleKeystore_Accounts_Call struct { + *mock.Call +} + +// Accounts is a helper method to define mock.On call +// - ctx context.Context +func (_e *SimpleKeystore_Expecter) Accounts(ctx interface{}) *SimpleKeystore_Accounts_Call { + return &SimpleKeystore_Accounts_Call{Call: _e.mock.On("Accounts", ctx)} +} + +func (_c *SimpleKeystore_Accounts_Call) Run(run func(ctx context.Context)) *SimpleKeystore_Accounts_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *SimpleKeystore_Accounts_Call) Return(accounts []string, err error) *SimpleKeystore_Accounts_Call { + _c.Call.Return(accounts, err) + return _c +} + +func (_c *SimpleKeystore_Accounts_Call) RunAndReturn(run func(context.Context) ([]string, error)) *SimpleKeystore_Accounts_Call { + _c.Call.Return(run) + return _c +} + // Sign provides a mock function with given fields: ctx, account, data func (_m *SimpleKeystore) Sign(ctx context.Context, account string, data []byte) ([]byte, error) { ret := _m.Called(ctx, account, data) @@ -73,6 +109,36 @@ func (_m *SimpleKeystore) Sign(ctx context.Context, account string, data []byte) return r0, r1 } +// SimpleKeystore_Sign_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sign' +type SimpleKeystore_Sign_Call struct { + *mock.Call +} + +// Sign is a helper method to define mock.On call +// - ctx context.Context +// - account string +// - data []byte +func (_e *SimpleKeystore_Expecter) Sign(ctx interface{}, account interface{}, data interface{}) *SimpleKeystore_Sign_Call { + return &SimpleKeystore_Sign_Call{Call: _e.mock.On("Sign", ctx, account, data)} +} + +func (_c *SimpleKeystore_Sign_Call) Run(run func(ctx context.Context, account string, data []byte)) *SimpleKeystore_Sign_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].([]byte)) + }) + return _c +} + +func (_c *SimpleKeystore_Sign_Call) Return(signature []byte, err error) *SimpleKeystore_Sign_Call { + _c.Call.Return(signature, err) + return _c +} + +func (_c *SimpleKeystore_Sign_Call) RunAndReturn(run func(context.Context, string, []byte) ([]byte, error)) *SimpleKeystore_Sign_Call { + _c.Call.Return(run) + return _c +} + // NewSimpleKeystore creates a new instance of SimpleKeystore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewSimpleKeystore(t interface { diff --git a/pkg/solana/txm/txm.go b/pkg/solana/txm/txm.go index e34c99cef..13b7fcfdc 100644 --- a/pkg/solana/txm/txm.go +++ b/pkg/solana/txm/txm.go @@ -38,7 +38,6 @@ const ( var _ services.Service = (*Txm)(nil) -//go:generate mockery --name SimpleKeystore --output ./mocks/ --case=underscore --filename simple_keystore.go type SimpleKeystore interface { Sign(ctx context.Context, account string, data []byte) (signature []byte, err error) Accounts(ctx context.Context) (accounts []string, err error) diff --git a/scripts/build-contract-artifacts-action.sh b/scripts/build-contract-artifacts-action.sh index 4aa2781b3..379628b43 100755 --- a/scripts/build-contract-artifacts-action.sh +++ b/scripts/build-contract-artifacts-action.sh @@ -11,8 +11,8 @@ CONTRACTS=${REPO}/contracts # install go apt-get update apt-get install -y wget -wget https://golang.org/dl/go1.21.7.linux-amd64.tar.gz -tar -xvf go1.21.7.linux-amd64.tar.gz +wget https://golang.org/dl/go1.22.8.linux-amd64.tar.gz +tar -xvf go1.22.8.linux-amd64.tar.gz mv go /usr/local export PATH=/usr/local/go/bin:$PATH export GOPATH=$HOME/go From 4f60a790983af3f5a4b6fb854d05da59e32aa1bf Mon Sep 17 00:00:00 2001 From: "app-token-issuer-infra-releng[bot]" <120227048+app-token-issuer-infra-releng[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 09:58:15 +0000 Subject: [PATCH 10/12] [automated] bump solana image to v2.0.16 (#934) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- integration-tests/testconfig/default.toml | 2 +- scripts/install-solana-ci.sh | 2 +- scripts/setup-localnet/localnet.sh | 2 +- solana.nix | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/integration-tests/testconfig/default.toml b/integration-tests/testconfig/default.toml index e8cc0a535..386f1d7a9 100644 --- a/integration-tests/testconfig/default.toml +++ b/integration-tests/testconfig/default.toml @@ -38,7 +38,7 @@ inside_k8 = false network = "localnet" user = "default" stateful_db = false -devnet_image = "anzaxyz/agave:v2.0.15" +devnet_image = "anzaxyz/agave:v2.0.16" [OCR2] node_count = 6 diff --git a/scripts/install-solana-ci.sh b/scripts/install-solana-ci.sh index 8f62e30f9..146f53a69 100755 --- a/scripts/install-solana-ci.sh +++ b/scripts/install-solana-ci.sh @@ -2,5 +2,5 @@ set -euxo pipefail -sh -c "$(curl -sSfL https://release.anza.xyz/v2.0.15/install)" +sh -c "$(curl -sSfL https://release.anza.xyz/v2.0.16/install)" echo "PATH=$HOME/.local/share/solana/install/active_release/bin:$PATH" >> $GITHUB_ENV diff --git a/scripts/setup-localnet/localnet.sh b/scripts/setup-localnet/localnet.sh index 31a005ea1..04a6c6538 100755 --- a/scripts/setup-localnet/localnet.sh +++ b/scripts/setup-localnet/localnet.sh @@ -6,7 +6,7 @@ cpu_struct="linux"; # Clean up first bash "$(dirname -- "$0";)/localnet.down.sh" -container_version=v2.0.15 +container_version=v2.0.16 container_name="chainlink-solana.test-validator" echo "Starting $container_name@$container_version" diff --git a/solana.nix b/solana.nix index 401e015d7..fb137adfd 100644 --- a/solana.nix +++ b/solana.nix @@ -5,7 +5,7 @@ # Solana integration let - version = "v2.0.15"; + version = "v2.0.16"; getBinDerivation = { name, @@ -37,14 +37,14 @@ let name = "solana-cli-x86_64-linux"; filename = "solana-release-x86_64-unknown-linux-gnu.tar.bz2"; ### BEGIN_LINUX_SHA256 ### - sha256 = "sha256-Hd8qhNExur6CSHF7S1ZzRSGMZrJW9FHU3JzVnShvkLI="; + sha256 = "sha256-Wq8Ep4Dvs7GpiB6y8LCpw+43jRCuhWgBvxDu7c+2dao="; ### END_LINUX_SHA256 ### }; aarch64-apple-darwin = getBinDerivation { name = "solana-cli-aarch64-apple-darwin"; filename = "solana-release-aarch64-apple-darwin.tar.bz2"; ### BEGIN_DARWIN_SHA256 ### - sha256 = "sha256-BglUcvkGx+D0Has9/BqE1WWQ8PNfdOlc75OM5/jFn7E="; + sha256 = "sha256-Blgit1LdL9ykyAErX22xC7AqH+s4WNJ2oVt9HUIzVlk="; ### END_DARWIN_SHA256 ### }; }; From 4047dd3796f1b8a6dcf6a633cb6961d43041e6fa Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 21 Nov 2024 16:35:28 -0500 Subject: [PATCH 11/12] Update NodeStates map (#937) --- go.mod | 2 +- go.sum | 4 ++-- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 ++-- pkg/solana/client/multinode/multi_node.go | 8 ++++---- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 85c6898ee..57024c04c 100644 --- a/go.mod +++ b/go.mod @@ -84,7 +84,7 @@ require ( github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/riferrei/srclient v0.5.4 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.2.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect diff --git a/go.sum b/go.sum index e73004db8..cb99a152f 100644 --- a/go.sum +++ b/go.sum @@ -418,8 +418,8 @@ github.com/riferrei/srclient v0.5.4/go.mod h1:vbkLmWcgYa7JgfPvuy/+K8fTS0p1bApqad github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index bbe5fade1..46970166c 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -367,7 +367,7 @@ require ( github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 23ab321c5..16df3feac 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1318,8 +1318,8 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= diff --git a/pkg/solana/client/multinode/multi_node.go b/pkg/solana/client/multinode/multi_node.go index 92a65912b..fa05a75cd 100644 --- a/pkg/solana/client/multinode/multi_node.go +++ b/pkg/solana/client/multinode/multi_node.go @@ -130,13 +130,13 @@ func (c *MultiNode[CHAIN_ID, RPC]) DoAll(ctx context.Context, do func(ctx contex return err } -func (c *MultiNode[CHAIN_ID, RPC]) NodeStates() map[string]NodeState { - states := map[string]NodeState{} +func (c *MultiNode[CHAIN_ID, RPC]) NodeStates() map[string]string { + states := map[string]string{} for _, n := range c.primaryNodes { - states[n.String()] = n.State() + states[n.Name()] = n.State().String() } for _, n := range c.sendOnlyNodes { - states[n.String()] = n.State() + states[n.Name()] = n.State().String() } return states } From c9ef4b36eb7b806cb3fed4a6e418f48db0305b85 Mon Sep 17 00:00:00 2001 From: Awbrey Hughlett Date: Fri, 22 Nov 2024 09:49:42 -0500 Subject: [PATCH 12/12] event loader with event watch and backfill (#911) * event loader with event watch and backfill * increase test coverage for log parsing * fix: handle new contracts (#940) --------- Co-authored-by: Aaron Lu <50029043+aalu1418@users.noreply.github.com> --- .mockery.yaml | 5 +- contracts/Anchor.toml | 12 +- contracts/Cargo.lock | 7 + .../generated/log_read_test/CreateLog.go | 146 ++ .../generated/log_read_test/CreateLog_test.go | 32 + contracts/generated/log_read_test/accounts.go | 3 + .../generated/log_read_test/instructions.go | 117 + .../generated/log_read_test/testing_utils.go | 20 + contracts/generated/log_read_test/types.go | 3 + contracts/pnpm-lock.yaml | 1991 +++++++++-------- contracts/programs/log-read-test/Cargo.toml | 19 + contracts/programs/log-read-test/Xargo.toml | 2 + contracts/programs/log-read-test/src/event.rs | 7 + contracts/programs/log-read-test/src/lib.rs | 25 + integration-tests/common/test_common.go | 5 +- integration-tests/smoke/event_loader_test.go | 296 +++ pkg/solana/client/test_helpers.go | 29 +- pkg/solana/logpoller/job.go | 147 ++ pkg/solana/logpoller/loader.go | 281 +++ pkg/solana/logpoller/loader_test.go | 366 +++ pkg/solana/logpoller/log_data_parser.go | 143 ++ pkg/solana/logpoller/log_data_parser_test.go | 203 ++ pkg/solana/logpoller/mocks/rpc_client.go | 280 +++ pkg/solana/logpoller/worker.go | 368 +++ pkg/solana/logpoller/worker_test.go | 251 +++ 25 files changed, 3862 insertions(+), 896 deletions(-) create mode 100644 contracts/generated/log_read_test/CreateLog.go create mode 100644 contracts/generated/log_read_test/CreateLog_test.go create mode 100644 contracts/generated/log_read_test/accounts.go create mode 100644 contracts/generated/log_read_test/instructions.go create mode 100644 contracts/generated/log_read_test/testing_utils.go create mode 100644 contracts/generated/log_read_test/types.go create mode 100644 contracts/programs/log-read-test/Cargo.toml create mode 100644 contracts/programs/log-read-test/Xargo.toml create mode 100644 contracts/programs/log-read-test/src/event.rs create mode 100644 contracts/programs/log-read-test/src/lib.rs create mode 100644 integration-tests/smoke/event_loader_test.go create mode 100644 pkg/solana/logpoller/job.go create mode 100644 pkg/solana/logpoller/loader.go create mode 100644 pkg/solana/logpoller/loader_test.go create mode 100644 pkg/solana/logpoller/log_data_parser.go create mode 100644 pkg/solana/logpoller/log_data_parser_test.go create mode 100644 pkg/solana/logpoller/mocks/rpc_client.go create mode 100644 pkg/solana/logpoller/worker.go create mode 100644 pkg/solana/logpoller/worker_test.go diff --git a/.mockery.yaml b/.mockery.yaml index 1ef8d4a73..1df96bfec 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -36,4 +36,7 @@ packages: SimpleKeystore: config: filename: simple_keystore.go - case: underscore \ No newline at end of file + case: underscore + github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller: + interfaces: + RPCClient: diff --git a/contracts/Anchor.toml b/contracts/Anchor.toml index 3611e1c0b..78a2222ad 100644 --- a/contracts/Anchor.toml +++ b/contracts/Anchor.toml @@ -1,12 +1,17 @@ +[toolchain] anchor_version = "0.29.0" +[features] +seeds = false +skip-lint = false + [registry] url = "https://anchor.projectserum.com" [provider] cluster = "localnet" -# wallet = "~/.config/solana/id.json" wallet = "id.json" +# wallet = "~/.config/solana/id.json" [scripts] test = "pnpm run test" @@ -21,6 +26,7 @@ test = "pnpm run test" # TODO: add pubkeys [programs.localnet] -ocr_2 = "cjg3oHmg9uuPsP8D6g29NWvhySJkdYdAo9D25PRbKXJ" # need to rename the idl to satisfy anchor.js... -store = "HEvSKofvBgfaexv23kMabbYqxasxU3mQ4ibBMEmJWHny" access_controller = "9xi644bRR8birboDGdTiwBq3C7VEeR7VuamRYYXCubUW" +log-read-test = "J1zQwrBNBngz26jRPNWsUSZMHJwBwpkoDitXRV95LdK4" +ocr_2 = "cjg3oHmg9uuPsP8D6g29NWvhySJkdYdAo9D25PRbKXJ" # need to rename the idl to satisfy anchor.js... +store = "HEvSKofvBgfaexv23kMabbYqxasxU3mQ4ibBMEmJWHny" \ No newline at end of file diff --git a/contracts/Cargo.lock b/contracts/Cargo.lock index ba4b5e1d7..953b2b81e 100644 --- a/contracts/Cargo.lock +++ b/contracts/Cargo.lock @@ -1214,6 +1214,13 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +[[package]] +name = "log-read-test" +version = "0.1.0" +dependencies = [ + "anchor-lang", +] + [[package]] name = "memchr" version = "2.7.1" diff --git a/contracts/generated/log_read_test/CreateLog.go b/contracts/generated/log_read_test/CreateLog.go new file mode 100644 index 000000000..45b0d3317 --- /dev/null +++ b/contracts/generated/log_read_test/CreateLog.go @@ -0,0 +1,146 @@ +// Code generated by https://github.com/gagliardetto/anchor-go. DO NOT EDIT. + +package log_read_test + +import ( + "errors" + ag_binary "github.com/gagliardetto/binary" + ag_solanago "github.com/gagliardetto/solana-go" + ag_format "github.com/gagliardetto/solana-go/text/format" + ag_treeout "github.com/gagliardetto/treeout" +) + +// CreateLog is the `createLog` instruction. +type CreateLog struct { + Value *uint64 + + // [0] = [SIGNER] authority + // + // [1] = [] systemProgram + ag_solanago.AccountMetaSlice `bin:"-" borsh_skip:"true"` +} + +// NewCreateLogInstructionBuilder creates a new `CreateLog` instruction builder. +func NewCreateLogInstructionBuilder() *CreateLog { + nd := &CreateLog{ + AccountMetaSlice: make(ag_solanago.AccountMetaSlice, 2), + } + return nd +} + +// SetValue sets the "value" parameter. +func (inst *CreateLog) SetValue(value uint64) *CreateLog { + inst.Value = &value + return inst +} + +// SetAuthorityAccount sets the "authority" account. +func (inst *CreateLog) SetAuthorityAccount(authority ag_solanago.PublicKey) *CreateLog { + inst.AccountMetaSlice[0] = ag_solanago.Meta(authority).SIGNER() + return inst +} + +// GetAuthorityAccount gets the "authority" account. +func (inst *CreateLog) GetAuthorityAccount() *ag_solanago.AccountMeta { + return inst.AccountMetaSlice[0] +} + +// SetSystemProgramAccount sets the "systemProgram" account. +func (inst *CreateLog) SetSystemProgramAccount(systemProgram ag_solanago.PublicKey) *CreateLog { + inst.AccountMetaSlice[1] = ag_solanago.Meta(systemProgram) + return inst +} + +// GetSystemProgramAccount gets the "systemProgram" account. +func (inst *CreateLog) GetSystemProgramAccount() *ag_solanago.AccountMeta { + return inst.AccountMetaSlice[1] +} + +func (inst CreateLog) Build() *Instruction { + return &Instruction{BaseVariant: ag_binary.BaseVariant{ + Impl: inst, + TypeID: Instruction_CreateLog, + }} +} + +// ValidateAndBuild validates the instruction parameters and accounts; +// if there is a validation error, it returns the error. +// Otherwise, it builds and returns the instruction. +func (inst CreateLog) ValidateAndBuild() (*Instruction, error) { + if err := inst.Validate(); err != nil { + return nil, err + } + return inst.Build(), nil +} + +func (inst *CreateLog) Validate() error { + // Check whether all (required) parameters are set: + { + if inst.Value == nil { + return errors.New("Value parameter is not set") + } + } + + // Check whether all (required) accounts are set: + { + if inst.AccountMetaSlice[0] == nil { + return errors.New("accounts.Authority is not set") + } + if inst.AccountMetaSlice[1] == nil { + return errors.New("accounts.SystemProgram is not set") + } + } + return nil +} + +func (inst *CreateLog) EncodeToTree(parent ag_treeout.Branches) { + parent.Child(ag_format.Program(ProgramName, ProgramID)). + // + ParentFunc(func(programBranch ag_treeout.Branches) { + programBranch.Child(ag_format.Instruction("CreateLog")). + // + ParentFunc(func(instructionBranch ag_treeout.Branches) { + + // Parameters of the instruction: + instructionBranch.Child("Params[len=1]").ParentFunc(func(paramsBranch ag_treeout.Branches) { + paramsBranch.Child(ag_format.Param("Value", *inst.Value)) + }) + + // Accounts of the instruction: + instructionBranch.Child("Accounts[len=2]").ParentFunc(func(accountsBranch ag_treeout.Branches) { + accountsBranch.Child(ag_format.Meta(" authority", inst.AccountMetaSlice[0])) + accountsBranch.Child(ag_format.Meta("systemProgram", inst.AccountMetaSlice[1])) + }) + }) + }) +} + +func (obj CreateLog) MarshalWithEncoder(encoder *ag_binary.Encoder) (err error) { + // Serialize `Value` param: + err = encoder.Encode(obj.Value) + if err != nil { + return err + } + return nil +} +func (obj *CreateLog) UnmarshalWithDecoder(decoder *ag_binary.Decoder) (err error) { + // Deserialize `Value`: + err = decoder.Decode(&obj.Value) + if err != nil { + return err + } + return nil +} + +// NewCreateLogInstruction declares a new CreateLog instruction with the provided parameters and accounts. +func NewCreateLogInstruction( + // Parameters: + value uint64, + // Accounts: + authority ag_solanago.PublicKey, + systemProgram ag_solanago.PublicKey) *CreateLog { + return NewCreateLogInstructionBuilder(). + SetValue(value). + SetAuthorityAccount(authority). + SetSystemProgramAccount(systemProgram) +} diff --git a/contracts/generated/log_read_test/CreateLog_test.go b/contracts/generated/log_read_test/CreateLog_test.go new file mode 100644 index 000000000..e15524426 --- /dev/null +++ b/contracts/generated/log_read_test/CreateLog_test.go @@ -0,0 +1,32 @@ +// Code generated by https://github.com/gagliardetto/anchor-go. DO NOT EDIT. + +package log_read_test + +import ( + "bytes" + ag_gofuzz "github.com/gagliardetto/gofuzz" + ag_require "github.com/stretchr/testify/require" + "strconv" + "testing" +) + +func TestEncodeDecode_CreateLog(t *testing.T) { + fu := ag_gofuzz.New().NilChance(0) + for i := 0; i < 1; i++ { + t.Run("CreateLog"+strconv.Itoa(i), func(t *testing.T) { + { + params := new(CreateLog) + fu.Fuzz(params) + params.AccountMetaSlice = nil + buf := new(bytes.Buffer) + err := encodeT(*params, buf) + ag_require.NoError(t, err) + got := new(CreateLog) + err = decodeT(got, buf.Bytes()) + got.AccountMetaSlice = nil + ag_require.NoError(t, err) + ag_require.Equal(t, params, got) + } + }) + } +} diff --git a/contracts/generated/log_read_test/accounts.go b/contracts/generated/log_read_test/accounts.go new file mode 100644 index 000000000..981b967ad --- /dev/null +++ b/contracts/generated/log_read_test/accounts.go @@ -0,0 +1,3 @@ +// Code generated by https://github.com/gagliardetto/anchor-go. DO NOT EDIT. + +package log_read_test diff --git a/contracts/generated/log_read_test/instructions.go b/contracts/generated/log_read_test/instructions.go new file mode 100644 index 000000000..919528bbf --- /dev/null +++ b/contracts/generated/log_read_test/instructions.go @@ -0,0 +1,117 @@ +// Code generated by https://github.com/gagliardetto/anchor-go. DO NOT EDIT. + +package log_read_test + +import ( + "bytes" + "fmt" + ag_spew "github.com/davecgh/go-spew/spew" + ag_binary "github.com/gagliardetto/binary" + ag_solanago "github.com/gagliardetto/solana-go" + ag_text "github.com/gagliardetto/solana-go/text" + ag_treeout "github.com/gagliardetto/treeout" +) + +var ProgramID ag_solanago.PublicKey + +func SetProgramID(pubkey ag_solanago.PublicKey) { + ProgramID = pubkey + ag_solanago.RegisterInstructionDecoder(ProgramID, registryDecodeInstruction) +} + +const ProgramName = "LogReadTest" + +func init() { + if !ProgramID.IsZero() { + ag_solanago.RegisterInstructionDecoder(ProgramID, registryDecodeInstruction) + } +} + +var ( + Instruction_CreateLog = ag_binary.TypeID([8]byte{215, 95, 248, 114, 153, 204, 208, 48}) +) + +// InstructionIDToName returns the name of the instruction given its ID. +func InstructionIDToName(id ag_binary.TypeID) string { + switch id { + case Instruction_CreateLog: + return "CreateLog" + default: + return "" + } +} + +type Instruction struct { + ag_binary.BaseVariant +} + +func (inst *Instruction) EncodeToTree(parent ag_treeout.Branches) { + if enToTree, ok := inst.Impl.(ag_text.EncodableToTree); ok { + enToTree.EncodeToTree(parent) + } else { + parent.Child(ag_spew.Sdump(inst)) + } +} + +var InstructionImplDef = ag_binary.NewVariantDefinition( + ag_binary.AnchorTypeIDEncoding, + []ag_binary.VariantType{ + { + "create_log", (*CreateLog)(nil), + }, + }, +) + +func (inst *Instruction) ProgramID() ag_solanago.PublicKey { + return ProgramID +} + +func (inst *Instruction) Accounts() (out []*ag_solanago.AccountMeta) { + return inst.Impl.(ag_solanago.AccountsGettable).GetAccounts() +} + +func (inst *Instruction) Data() ([]byte, error) { + buf := new(bytes.Buffer) + if err := ag_binary.NewBorshEncoder(buf).Encode(inst); err != nil { + return nil, fmt.Errorf("unable to encode instruction: %w", err) + } + return buf.Bytes(), nil +} + +func (inst *Instruction) TextEncode(encoder *ag_text.Encoder, option *ag_text.Option) error { + return encoder.Encode(inst.Impl, option) +} + +func (inst *Instruction) UnmarshalWithDecoder(decoder *ag_binary.Decoder) error { + return inst.BaseVariant.UnmarshalBinaryVariant(decoder, InstructionImplDef) +} + +func (inst *Instruction) MarshalWithEncoder(encoder *ag_binary.Encoder) error { + err := encoder.WriteBytes(inst.TypeID.Bytes(), false) + if err != nil { + return fmt.Errorf("unable to write variant type: %w", err) + } + return encoder.Encode(inst.Impl) +} + +func registryDecodeInstruction(accounts []*ag_solanago.AccountMeta, data []byte) (interface{}, error) { + inst, err := DecodeInstruction(accounts, data) + if err != nil { + return nil, err + } + return inst, nil +} + +func DecodeInstruction(accounts []*ag_solanago.AccountMeta, data []byte) (*Instruction, error) { + inst := new(Instruction) + if err := ag_binary.NewBorshDecoder(data).Decode(inst); err != nil { + return nil, fmt.Errorf("unable to decode instruction: %w", err) + } + if v, ok := inst.Impl.(ag_solanago.AccountsSettable); ok { + err := v.SetAccounts(accounts) + if err != nil { + return nil, fmt.Errorf("unable to set accounts for instruction: %w", err) + } + } + return inst, nil +} diff --git a/contracts/generated/log_read_test/testing_utils.go b/contracts/generated/log_read_test/testing_utils.go new file mode 100644 index 000000000..963931602 --- /dev/null +++ b/contracts/generated/log_read_test/testing_utils.go @@ -0,0 +1,20 @@ +// Code generated by https://github.com/gagliardetto/anchor-go. DO NOT EDIT. + +package log_read_test + +import ( + "bytes" + "fmt" + ag_binary "github.com/gagliardetto/binary" +) + +func encodeT(data interface{}, buf *bytes.Buffer) error { + if err := ag_binary.NewBorshEncoder(buf).Encode(data); err != nil { + return fmt.Errorf("unable to encode instruction: %w", err) + } + return nil +} + +func decodeT(dst interface{}, data []byte) error { + return ag_binary.NewBorshDecoder(data).Decode(dst) +} diff --git a/contracts/generated/log_read_test/types.go b/contracts/generated/log_read_test/types.go new file mode 100644 index 000000000..981b967ad --- /dev/null +++ b/contracts/generated/log_read_test/types.go @@ -0,0 +1,3 @@ +// Code generated by https://github.com/gagliardetto/anchor-go. DO NOT EDIT. + +package log_read_test diff --git a/contracts/pnpm-lock.yaml b/contracts/pnpm-lock.yaml index 86fd48c66..860108de1 100644 --- a/contracts/pnpm-lock.yaml +++ b/contracts/pnpm-lock.yaml @@ -1,77 +1,899 @@ -lockfileVersion: '6.0' +lockfileVersion: '9.0' settings: autoInstallPeers: true excludeLinksFromLockfile: false -dependencies: - '@chainlink/solana-sdk': - specifier: link:../ts - version: link:../ts - '@coral-xyz/anchor': - specifier: ^0.29.0 - version: 0.29.0 - '@solana/spl-token': - specifier: ^0.3.5 - version: 0.3.11(@solana/web3.js@1.92.3)(fastestsmallesttextencoderdecoder@1.0.22) - '@solana/web3.js': - specifier: ^1.50.1 <=1.92.3 - version: 1.92.3 - '@types/chai': - specifier: ^4.2.22 - version: 4.3.12 - '@types/mocha': - specifier: ^9.0.0 - version: 9.1.1 - '@types/node': - specifier: ^14.14.37 - version: 14.18.63 - '@types/secp256k1': - specifier: ^4.0.3 - version: 4.0.6 - bn.js: - specifier: ^5.2.0 - version: 5.2.1 - borsh: - specifier: ^0.7.0 - version: 0.7.0 - chai: - specifier: ^4.3.4 - version: 4.4.1 - ethereum-cryptography: - specifier: ^0.1.3 - version: 0.1.3 - mocha: - specifier: ^9.0.0 - version: 9.2.2 - prettier: - specifier: ^2.5.1 - version: 2.8.8 - rpc-websockets: - specifier: <=7.10.0 - version: 7.10.0 - secp256k1: - specifier: ^4.0.2 - version: 4.0.3 - ts-mocha: - specifier: ^8.0.0 - version: 8.0.0(mocha@9.2.2) - typescript: - specifier: ^4.5.4 - version: 4.9.5 +importers: + + .: + dependencies: + '@chainlink/solana-sdk': + specifier: link:../ts + version: link:../ts + '@coral-xyz/anchor': + specifier: ^0.29.0 + version: 0.29.0 + '@solana/spl-token': + specifier: ^0.3.5 + version: 0.3.11(@solana/web3.js@1.92.3)(fastestsmallesttextencoderdecoder@1.0.22) + '@solana/web3.js': + specifier: ^1.50.1 <=1.92.3 + version: 1.92.3 + '@types/chai': + specifier: ^4.2.22 + version: 4.3.12 + '@types/mocha': + specifier: ^9.0.0 + version: 9.1.1 + '@types/node': + specifier: ^14.14.37 + version: 14.18.63 + '@types/secp256k1': + specifier: ^4.0.3 + version: 4.0.6 + bn.js: + specifier: ^5.2.0 + version: 5.2.1 + borsh: + specifier: ^0.7.0 + version: 0.7.0 + chai: + specifier: ^4.3.4 + version: 4.4.1 + ethereum-cryptography: + specifier: ^0.1.3 + version: 0.1.3 + mocha: + specifier: ^9.0.0 + version: 9.2.2 + prettier: + specifier: ^2.5.1 + version: 2.8.8 + rpc-websockets: + specifier: <=7.10.0 + version: 7.10.0 + secp256k1: + specifier: ^4.0.2 + version: 4.0.3 + ts-mocha: + specifier: ^8.0.0 + version: 8.0.0(mocha@9.2.2) + typescript: + specifier: ^4.5.4 + version: 4.9.5 packages: - /@babel/runtime@7.25.6: + '@babel/runtime@7.25.6': resolution: {integrity: sha512-VBj9MYyDb9tuLq7yzqjgzt6Q+IBQLrGZfdjOekyEirZPHxXWoTSGUTMrpsfi58Up73d13NfYLv8HT9vmznjzhQ==} engines: {node: '>=6.9.0'} + + '@coral-xyz/anchor@0.29.0': + resolution: {integrity: sha512-eny6QNG0WOwqV0zQ7cs/b1tIuzZGmP7U7EcH+ogt4Gdbl8HDmIYVMh/9aTmYZPaFWjtUaI8qSn73uYEXWfATdA==} + engines: {node: '>=11'} + + '@coral-xyz/borsh@0.29.0': + resolution: {integrity: sha512-s7VFVa3a0oqpkuRloWVPdCK7hMbAMY270geZOGfCnaqexrP5dTIpbEHL33req6IYPPJ0hYa71cdvJ1h6V55/oQ==} + engines: {node: '>=10'} + peerDependencies: + '@solana/web3.js': ^1.68.0 + + '@noble/curves@1.6.0': + resolution: {integrity: sha512-TlaHRXDehJuRNR9TfZDNQ45mMEd5dwUwmicsafcIX4SsNiqnCHKjE/1alYPd/lDRVhxdhUAlv8uEhMCI5zjIJQ==} + engines: {node: ^14.21.3 || >=16} + + '@noble/hashes@1.5.0': + resolution: {integrity: sha512-1j6kQFb7QRru7eKN3ZDvRcP13rugwdxZqCjbiAVZfIJwgj2A65UmT4TgARXGlXgnRkORLTDTrO19ZErt7+QXgA==} + engines: {node: ^14.21.3 || >=16} + + '@solana/buffer-layout-utils@0.2.0': + resolution: {integrity: sha512-szG4sxgJGktbuZYDg2FfNmkMi0DYQoVjN2h7ta1W1hPrwzarcFLBq9UpX1UjNXsNpT9dn+chgprtWGioUAr4/g==} + engines: {node: '>= 10'} + + '@solana/buffer-layout@4.0.1': + resolution: {integrity: sha512-E1ImOIAD1tBZFRdjeM4/pzTiTApC0AOBGwyAMS4fwIodCWArzJ3DWdoh8cKxeFM2fElkxBh2Aqts1BPC373rHA==} + engines: {node: '>=5.10'} + + '@solana/codecs-core@2.0.0-experimental.8618508': + resolution: {integrity: sha512-JCz7mKjVKtfZxkuDtwMAUgA7YvJcA2BwpZaA1NOLcted4OMC4Prwa3DUe3f3181ixPYaRyptbF0Ikq2MbDkYEA==} + + '@solana/codecs-data-structures@2.0.0-experimental.8618508': + resolution: {integrity: sha512-sLpjL9sqzaDdkloBPV61Rht1tgaKq98BCtIKRuyscIrmVPu3wu0Bavk2n/QekmUzaTsj7K1pVSniM0YqCdnEBw==} + + '@solana/codecs-numbers@2.0.0-experimental.8618508': + resolution: {integrity: sha512-EXQKfzFr3CkKKNzKSZPOOOzchXsFe90TVONWsSnVkonO9z+nGKALE0/L9uBmIFGgdzhhU9QQVFvxBMclIDJo2Q==} + + '@solana/codecs-strings@2.0.0-experimental.8618508': + resolution: {integrity: sha512-b2yhinr1+oe+JDmnnsV0641KQqqDG8AQ16Z/x7GVWO+AWHMpRlHWVXOq8U1yhPMA4VXxl7i+D+C6ql0VGFp0GA==} + peerDependencies: + fastestsmallesttextencoderdecoder: ^1.0.22 + + '@solana/options@2.0.0-experimental.8618508': + resolution: {integrity: sha512-fy/nIRAMC3QHvnKi63KEd86Xr/zFBVxNW4nEpVEU2OT0gCEKwHY4Z55YHf7XujhyuM3PNpiBKg/YYw5QlRU4vg==} + + '@solana/spl-token-metadata@0.1.2': + resolution: {integrity: sha512-hJYnAJNkDrtkE2Q41YZhCpeOGU/0JgRFXbtrtOuGGeKc3pkEUHB9DDoxZAxx+XRno13GozUleyBi0qypz4c3bw==} + engines: {node: '>=16'} + peerDependencies: + '@solana/web3.js': ^1.87.6 + + '@solana/spl-token@0.3.11': + resolution: {integrity: sha512-bvohO3rIMSVL24Pb+I4EYTJ6cL82eFpInEXD/I8K8upOGjpqHsKUoAempR/RnUlI1qSFNyFlWJfu6MNUgfbCQQ==} + engines: {node: '>=16'} + peerDependencies: + '@solana/web3.js': ^1.88.0 + + '@solana/spl-type-length-value@0.1.0': + resolution: {integrity: sha512-JBMGB0oR4lPttOZ5XiUGyvylwLQjt1CPJa6qQ5oM+MBCndfjz2TKKkw0eATlLLcYmq1jBVsNlJ2cD6ns2GR7lA==} + engines: {node: '>=16'} + + '@solana/web3.js@1.92.3': + resolution: {integrity: sha512-NVBWvb9zdJIAx6X+caXaIICCEQfQaQ8ygykCjJW4u2z/sIKcvPj3ZIIllnx0MWMc3IxGq15ozGYDOQIMbwUcHw==} + + '@solana/web3.js@1.95.3': + resolution: {integrity: sha512-O6rPUN0w2fkNqx/Z3QJMB9L225Ex10PRDH8bTaIUPZXMPV0QP8ZpPvjQnXK+upUczlRgzHzd6SjKIha1p+I6og==} + + '@swc/helpers@0.5.13': + resolution: {integrity: sha512-UoKGxQ3r5kYI9dALKJapMmuK+1zWM/H17Z1+iwnNmzcJRnfFuevZs375TA5rW31pu4BS4NoSy1fRsexDXfWn5w==} + + '@types/chai@4.3.12': + resolution: {integrity: sha512-zNKDHG/1yxm8Il6uCCVsm+dRdEsJlFoDu73X17y09bId6UwoYww+vFBsAcRzl8knM1sab3Dp1VRikFQwDOtDDw==} + + '@types/connect@3.4.38': + resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==} + + '@types/json5@0.0.29': + resolution: {integrity: sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==} + + '@types/mocha@9.1.1': + resolution: {integrity: sha512-Z61JK7DKDtdKTWwLeElSEBcWGRLY8g95ic5FoQqI9CMx0ns/Ghep3B4DfcEimiKMvtamNVULVNKEsiwV3aQmXw==} + + '@types/node@12.20.55': + resolution: {integrity: sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ==} + + '@types/node@14.18.63': + resolution: {integrity: sha512-fAtCfv4jJg+ExtXhvCkCqUKZ+4ok/JQk01qDKhL5BDDoS3AxKXhV5/MAVUZyQnSEd2GT92fkgZl0pz0Q0AzcIQ==} + + '@types/node@22.6.0': + resolution: {integrity: sha512-QyR8d5bmq+eR72TwQDfujwShHMcIrWIYsaQFtXRE58MHPTEKUNxjxvl0yS0qPMds5xbSDWtp7ZpvGFtd7dfMdQ==} + + '@types/pbkdf2@3.1.2': + resolution: {integrity: sha512-uRwJqmiXmh9++aSu1VNEn3iIxWOhd8AHXNSdlaLfdAAdSTY9jYVeGWnzejM3dvrkbqE3/hyQkQQ29IFATEGlew==} + + '@types/secp256k1@4.0.6': + resolution: {integrity: sha512-hHxJU6PAEUn0TP4S/ZOzuTUvJWuZ6eIKeNKb5RBpODvSl6hp1Wrw4s7ATY50rklRCScUDpHzVA/DQdSjJ3UoYQ==} + + '@types/uuid@8.3.4': + resolution: {integrity: sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==} + + '@types/ws@7.4.7': + resolution: {integrity: sha512-JQbbmxZTZehdc2iszGKs5oC3NFnjeay7mtAWrdt7qNtAVK0g19muApzAy4bm9byz79xa2ZnO/BOBC2R8RC5Lww==} + + '@types/ws@8.5.12': + resolution: {integrity: sha512-3tPRkv1EtkDpzlgyKyI8pGsGZAGPEaXeu0DOj5DI25Ja91bdAYddYHbADRYVrZMRbfW+1l5YwXVDKohDJNQxkQ==} + + '@ungap/promise-all-settled@1.1.2': + resolution: {integrity: sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==} + + JSONStream@1.3.5: + resolution: {integrity: sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==} + hasBin: true + + agentkeepalive@4.5.0: + resolution: {integrity: sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==} + engines: {node: '>= 8.0.0'} + + ansi-colors@4.1.1: + resolution: {integrity: sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==} + engines: {node: '>=6'} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + anymatch@3.1.3: + resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} + engines: {node: '>= 8'} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + arrify@1.0.1: + resolution: {integrity: sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==} + engines: {node: '>=0.10.0'} + + assertion-error@1.1.0: + resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==} + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + base-x@3.0.10: + resolution: {integrity: sha512-7d0s06rR9rYaIWHkpfLIFICM/tkSVdoPC9qYAQRpxn9DdKNWNsKC0uk++akckyLq16Tx2WIinnZ6WRriAt6njQ==} + + base64-js@1.5.1: + resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} + + bigint-buffer@1.1.5: + resolution: {integrity: sha512-trfYco6AoZ+rKhKnxA0hgX0HAbVP/s808/EuDSe2JDzUnCp/xAsli35Orvk67UrTEcwuxZqYZDmfA2RXJgxVvA==} + engines: {node: '>= 10.0.0'} + + bignumber.js@9.1.2: + resolution: {integrity: sha512-2/mKyZH9K85bzOEfhXDBFZTGd1CTs+5IHpeFQo9luiBG7hghdC851Pj2WAhb6E3R6b9tZj/XKhbg4fum+Kepug==} + + binary-extensions@2.3.0: + resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} + engines: {node: '>=8'} + + bindings@1.5.0: + resolution: {integrity: sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==} + + blakejs@1.2.1: + resolution: {integrity: sha512-QXUSXI3QVc/gJME0dBpXrag1kbzOqCjCX8/b54ntNyW6sjtoqxqRk3LTmXzaJoh71zMsDCjM+47jS7XiwN/+fQ==} + + bn.js@4.12.0: + resolution: {integrity: sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==} + + bn.js@5.2.1: + resolution: {integrity: sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==} + + borsh@0.7.0: + resolution: {integrity: sha512-CLCsZGIBCFnPtkNnieW/a8wmreDmfUtjU2m9yHrzPXIlNbqVs0AQrSatSG6vdNYUqdc83tkQi2eHfF98ubzQLA==} + + brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + + braces@3.0.2: + resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} + engines: {node: '>=8'} + + brorand@1.1.0: + resolution: {integrity: sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w==} + + browser-stdout@1.3.1: + resolution: {integrity: sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==} + + browserify-aes@1.2.0: + resolution: {integrity: sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==} + + bs58@4.0.1: + resolution: {integrity: sha512-Ok3Wdf5vOIlBrgCvTq96gBkJw+JUEzdBgyaza5HLtPm7yTHkjRy8+JzNyHF7BHa0bNWOQIp3m5YF0nnFcOIKLw==} + + bs58check@2.1.2: + resolution: {integrity: sha512-0TS1jicxdU09dwJMNZtVAfzPi6Q6QeN0pM1Fkzrjn+XYHvzMKPU3pHVpva+769iNVSfIYWf7LJ6WR+BuuMf8cA==} + + buffer-from@1.1.2: + resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} + + buffer-layout@1.2.2: + resolution: {integrity: sha512-kWSuLN694+KTk8SrYvCqwP2WcgQjoRCiF5b4QDvkkz8EmgD+aWAIceGFKMIAdmF/pH+vpgNV3d3kAKorcdAmWA==} + engines: {node: '>=4.5'} + + buffer-xor@1.0.3: + resolution: {integrity: sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ==} + + buffer@6.0.3: + resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + + bufferutil@4.0.8: + resolution: {integrity: sha512-4T53u4PdgsXqKaIctwF8ifXlRTTmEPJ8iEPWFdGZvcf7sbwYo6FKFEX9eNNAnzFZ7EzJAQ3CJeOtCRA4rDp7Pw==} + engines: {node: '>=6.14.2'} + + camelcase@6.3.0: + resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} + engines: {node: '>=10'} + + chai@4.4.1: + resolution: {integrity: sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==} + engines: {node: '>=4'} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + check-error@1.0.3: + resolution: {integrity: sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==} + + chokidar@3.5.3: + resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==} + engines: {node: '>= 8.10.0'} + + cipher-base@1.0.4: + resolution: {integrity: sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==} + + cliui@7.0.4: + resolution: {integrity: sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + commander@2.20.3: + resolution: {integrity: sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==} + + concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + create-hash@1.2.0: + resolution: {integrity: sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==} + + create-hmac@1.1.7: + resolution: {integrity: sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==} + + cross-fetch@3.1.8: + resolution: {integrity: sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==} + + crypto-hash@1.3.0: + resolution: {integrity: sha512-lyAZ0EMyjDkVvz8WOeVnuCPvKVBXcMv1l5SVqO1yC7PzTwrD/pPje/BIRbWhMoPe436U+Y2nD7f5bFx0kt+Sbg==} + engines: {node: '>=8'} + + debug@4.3.3: + resolution: {integrity: sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + decamelize@4.0.0: + resolution: {integrity: sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==} + engines: {node: '>=10'} + + deep-eql@4.1.3: + resolution: {integrity: sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==} + engines: {node: '>=6'} + + delay@5.0.0: + resolution: {integrity: sha512-ReEBKkIfe4ya47wlPYf/gu5ib6yUG0/Aez0JQZQz94kiWtRQvZIQbTiehsnwHvLSWJnQdhVeqYue7Id1dKr0qw==} + engines: {node: '>=10'} + + diff@3.5.0: + resolution: {integrity: sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==} + engines: {node: '>=0.3.1'} + + diff@5.0.0: + resolution: {integrity: sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==} + engines: {node: '>=0.3.1'} + + dot-case@3.0.4: + resolution: {integrity: sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==} + + elliptic@6.5.5: + resolution: {integrity: sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw==} + + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + es6-promise@4.2.8: + resolution: {integrity: sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==} + + es6-promisify@5.0.0: + resolution: {integrity: sha512-C+d6UdsYDk0lMebHNR4S2NybQMMngAOnOwYBQjTOiv0MkoJMP0Myw2mgpDLBcpfCmRLxyFqYhS/CfOENq4SJhQ==} + + escalade@3.1.2: + resolution: {integrity: sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==} + engines: {node: '>=6'} + + escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + ethereum-cryptography@0.1.3: + resolution: {integrity: sha512-w8/4x1SGGzc+tO97TASLja6SLd3fRIK2tLVcV2Gx4IB21hE19atll5Cq9o3d0ZmAYC/8aw0ipieTSiekAea4SQ==} + + eventemitter3@4.0.7: + resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} + + eventemitter3@5.0.1: + resolution: {integrity: sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==} + + evp_bytestokey@1.0.3: + resolution: {integrity: sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==} + + eyes@0.1.8: + resolution: {integrity: sha512-GipyPsXO1anza0AOZdy69Im7hGFCNB7Y/NGjDlZGJ3GJJLtwNSb2vrzYrTYJRrRloVx7pl+bhUaTB8yiccPvFQ==} + engines: {node: '> 0.1.90'} + + fast-stable-stringify@1.0.0: + resolution: {integrity: sha512-wpYMUmFu5f00Sm0cj2pfivpmawLZ0NKdviQ4w9zJeR8JVtOpOxHmLaJuj0vxvGqMJQWyP/COUkF75/57OKyRag==} + + fastestsmallesttextencoderdecoder@1.0.22: + resolution: {integrity: sha512-Pb8d48e+oIuY4MaM64Cd7OW1gt4nxCHs7/ddPPZ/Ic3sg8yVGM7O9wDvZ7us6ScaUupzM+pfBolwtYhN1IxBIw==} + + file-uri-to-path@1.0.0: + resolution: {integrity: sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==} + + fill-range@7.0.1: + resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} + engines: {node: '>=8'} + + find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + + flat@5.0.2: + resolution: {integrity: sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==} + hasBin: true + + fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + + get-func-name@2.0.2: + resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==} + + glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + + glob@7.2.0: + resolution: {integrity: sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==} + deprecated: Glob versions prior to v9 are no longer supported + + growl@1.10.5: + resolution: {integrity: sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==} + engines: {node: '>=4.x'} + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + hash-base@3.1.0: + resolution: {integrity: sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==} + engines: {node: '>=4'} + + hash.js@1.1.7: + resolution: {integrity: sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==} + + he@1.2.0: + resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==} + hasBin: true + + hmac-drbg@1.0.1: + resolution: {integrity: sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg==} + + humanize-ms@1.2.1: + resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} + + ieee754@1.2.1: + resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} + + inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + is-binary-path@2.1.0: + resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} + engines: {node: '>=8'} + + is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + + is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + + is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + is-plain-obj@2.1.0: + resolution: {integrity: sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==} + engines: {node: '>=8'} + + is-unicode-supported@0.1.0: + resolution: {integrity: sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==} + engines: {node: '>=10'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + isomorphic-ws@4.0.1: + resolution: {integrity: sha512-BhBvN2MBpWTaSHdWRb/bwdZJ1WaehQ2L1KngkCkfLUGF0mAWAT1sQUQacEmQ0jXkFw/czDXPNQSL5u2/Krsz1w==} + peerDependencies: + ws: '*' + + jayson@4.1.2: + resolution: {integrity: sha512-5nzMWDHy6f+koZOuYsArh2AXs73NfWYVlFyJJuCedr93GpY+Ku8qq10ropSXVfHK+H0T6paA88ww+/dV+1fBNA==} + engines: {node: '>=8'} + hasBin: true + + js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + + json-stringify-safe@5.0.1: + resolution: {integrity: sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==} + + json5@1.0.2: + resolution: {integrity: sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==} + hasBin: true + + jsonparse@1.3.1: + resolution: {integrity: sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==} + engines: {'0': node >= 0.2.0} + + keccak@3.0.4: + resolution: {integrity: sha512-3vKuW0jV8J3XNTzvfyicFR5qvxrSAGl7KIhvgOu5cmWwM7tZRj3fMbj/pfIf4be7aznbc+prBWGjywox/g2Y6Q==} + engines: {node: '>=10.0.0'} + + locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + + log-symbols@4.1.0: + resolution: {integrity: sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==} + engines: {node: '>=10'} + + loupe@2.3.7: + resolution: {integrity: sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==} + + lower-case@2.0.2: + resolution: {integrity: sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==} + + make-error@1.3.6: + resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} + + md5.js@1.3.5: + resolution: {integrity: sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==} + + minimalistic-assert@1.0.1: + resolution: {integrity: sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==} + + minimalistic-crypto-utils@1.0.1: + resolution: {integrity: sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg==} + + minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + + minimatch@4.2.1: + resolution: {integrity: sha512-9Uq1ChtSZO+Mxa/CL1eGizn2vRn3MlLgzhT0Iz8zaY8NdvxvB0d5QdPFmCKf7JKA9Lerx5vRrnwO03jsSfGG9g==} + engines: {node: '>=10'} + + minimist@1.2.8: + resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} + + mkdirp@0.5.6: + resolution: {integrity: sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==} + hasBin: true + + mocha@9.2.2: + resolution: {integrity: sha512-L6XC3EdwT6YrIk0yXpavvLkn8h+EU+Y5UcCHKECyMbdUIxyMuZj4bX4U9e1nvnvUUvQVsV2VHQr5zLdcUkhW/g==} + engines: {node: '>= 12.0.0'} + hasBin: true + + ms@2.1.2: + resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + nanoid@3.3.1: + resolution: {integrity: sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + no-case@3.0.4: + resolution: {integrity: sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==} + + node-addon-api@2.0.2: + resolution: {integrity: sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA==} + + node-fetch@2.7.0: + resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} + engines: {node: 4.x || >=6.0.0} + peerDependencies: + encoding: ^0.1.0 + peerDependenciesMeta: + encoding: + optional: true + + node-gyp-build@4.8.2: + resolution: {integrity: sha512-IRUxE4BVsHWXkV/SFOut4qTlagw2aM8T5/vnTsmrHJvVoKueJHRc/JaFND7QDDc61kLYUJ6qlZM3sqTSyx2dTw==} + hasBin: true + + normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + + pako@2.1.0: + resolution: {integrity: sha512-w+eufiZ1WuJYgPXbV/PO3NCMEc3xqylkKHzp8bxp1uW4qaSNQUkwmLLEc3kKsfz8lpV1F8Ht3U1Cm+9Srog2ug==} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + + pathval@1.1.1: + resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==} + + pbkdf2@3.1.2: + resolution: {integrity: sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==} + engines: {node: '>=0.12'} + + picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + prettier@2.8.8: + resolution: {integrity: sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==} + engines: {node: '>=10.13.0'} + hasBin: true + + randombytes@2.1.0: + resolution: {integrity: sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==} + + readable-stream@3.6.2: + resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} + engines: {node: '>= 6'} + + readdirp@3.6.0: + resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} + engines: {node: '>=8.10.0'} + + regenerator-runtime@0.14.1: + resolution: {integrity: sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==} + + require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + + ripemd160@2.0.2: + resolution: {integrity: sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==} + + rpc-websockets@7.10.0: + resolution: {integrity: sha512-cemZ6RiDtYZpPiBzYijdOrkQQzmBCmug0E9SdRH2gIUNT15ql4mwCYWIp0VnSZq6Qrw/JkGUygp4PrK1y9KfwQ==} + + rpc-websockets@8.0.1: + resolution: {integrity: sha512-PptrPRK40uQvifq5sCcObmqInVcZXhy+RRrirzdE5KUPvDI47y1wPvfckD2QzqngOU9xaPW/dT+G+b+wj6M1MQ==} + + rpc-websockets@9.0.2: + resolution: {integrity: sha512-YzggvfItxMY3Lwuax5rC18inhbjJv9Py7JXRHxTIi94JOLrqBsSsUUc5bbl5W6c11tXhdfpDPK0KzBhoGe8jjw==} + + safe-buffer@5.2.1: + resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} + + scrypt-js@3.0.1: + resolution: {integrity: sha512-cdwTTnqPu0Hyvf5in5asVdZocVDTNRmR7XEcJuIzMjJeSHybHl7vpB66AzwTaIg6CLSbtjcxc8fqcySfnTkccA==} + + secp256k1@4.0.3: + resolution: {integrity: sha512-NLZVf+ROMxwtEj3Xa562qgv2BK5e2WNmXPiOdVIPLgs6lyTzMvBq0aWTYMI5XCP9jZMVKOcqZLw/Wc4vDkuxhA==} + engines: {node: '>=10.0.0'} + + serialize-javascript@6.0.0: + resolution: {integrity: sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==} + + setimmediate@1.0.5: + resolution: {integrity: sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==} + + sha.js@2.4.11: + resolution: {integrity: sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==} + hasBin: true + + snake-case@3.0.4: + resolution: {integrity: sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==} + + source-map-support@0.5.21: + resolution: {integrity: sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==} + + source-map@0.6.1: + resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} + engines: {node: '>=0.10.0'} + + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + + string_decoder@1.3.0: + resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-bom@3.0.0: + resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==} + engines: {node: '>=4'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + superstruct@0.15.5: + resolution: {integrity: sha512-4AOeU+P5UuE/4nOUkmcQdW5y7i9ndt1cQd/3iUe+LTz3RxESf/W/5lg4B74HbDMMv8PHnPnGCQFH45kBcrQYoQ==} + + superstruct@1.0.4: + resolution: {integrity: sha512-7JpaAoX2NGyoFlI9NBh66BQXGONc+uE+MRS5i2iOBKuS4e+ccgMDjATgZldkah+33DakBxDHiss9kvUcGAO8UQ==} + engines: {node: '>=14.0.0'} + + superstruct@2.0.2: + resolution: {integrity: sha512-uV+TFRZdXsqXTL2pRvujROjdZQ4RAlBUS5BTh9IGm+jTqQntYThciG/qu57Gs69yjnVUSqdxF9YLmSnpupBW9A==} + engines: {node: '>=14.0.0'} + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + supports-color@8.1.1: + resolution: {integrity: sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==} + engines: {node: '>=10'} + + text-encoding-utf-8@1.0.2: + resolution: {integrity: sha512-8bw4MY9WjdsD2aMtO0OzOCY3pXGYNx2d2FfHRVUKkiCPDWjKuOlhLVASS+pD7VkLTVjW268LYJHwsnPFlBpbAg==} + + through@2.3.8: + resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==} + + to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + + toml@3.0.0: + resolution: {integrity: sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w==} + + tr46@0.0.3: + resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} + + ts-mocha@8.0.0: + resolution: {integrity: sha512-Kou1yxTlubLnD5C3unlCVO7nh0HERTezjoVhVw/M5S1SqoUec0WgllQvPk3vzPMc6by8m6xD1uR1yRf8lnVUbA==} + engines: {node: '>= 6.X.X'} + hasBin: true + peerDependencies: + mocha: ^3.X.X || ^4.X.X || ^5.X.X || ^6.X.X || ^7.X.X || ^8.X.X + + ts-node@7.0.1: + resolution: {integrity: sha512-BVwVbPJRspzNh2yfslyT1PSbl5uIk03EZlb493RKHN4qej/D06n1cEhjlOJG69oFsE7OT8XjpTUcYf6pKTLMhw==} + engines: {node: '>=4.2.0'} + hasBin: true + + tsconfig-paths@3.15.0: + resolution: {integrity: sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==} + + tslib@2.7.0: + resolution: {integrity: sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==} + + type-detect@4.0.8: + resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} + engines: {node: '>=4'} + + typescript@4.9.5: + resolution: {integrity: sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==} + engines: {node: '>=4.2.0'} + hasBin: true + + undici-types@6.19.8: + resolution: {integrity: sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==} + + utf-8-validate@5.0.10: + resolution: {integrity: sha512-Z6czzLq4u8fPOyx7TU6X3dvUZVvoJmxSQ+IcrlmagKhilxlhZgxPK6C5Jqbkw1IDUmFTM+cz9QDnnLTwDz/2gQ==} + engines: {node: '>=6.14.2'} + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + uuid@8.3.2: + resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} + hasBin: true + + webidl-conversions@3.0.1: + resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} + + whatwg-url@5.0.0: + resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + workerpool@6.2.0: + resolution: {integrity: sha512-Rsk5qQHJ9eowMH28Jwhe8HEbmdYDX4lwoMWshiCXugjtHqMD9ZbiqSDLxcsfdqsETPzVUtX5s1Z5kStiIM6l4A==} + + wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + ws@7.5.10: + resolution: {integrity: sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==} + engines: {node: '>=8.3.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: ^5.0.2 + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + + ws@8.18.0: + resolution: {integrity: sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==} + engines: {node: '>=10.0.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: '>=5.0.2' + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + + y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: '>=10'} + + yargs-parser@20.2.4: + resolution: {integrity: sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==} + engines: {node: '>=10'} + + yargs-parser@20.2.9: + resolution: {integrity: sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==} + engines: {node: '>=10'} + + yargs-unparser@2.0.0: + resolution: {integrity: sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==} + engines: {node: '>=10'} + + yargs@16.2.0: + resolution: {integrity: sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==} + engines: {node: '>=10'} + + yn@2.0.0: + resolution: {integrity: sha512-uTv8J/wiWTgUTg+9vLTi//leUl5vDQS6uii/emeTb2ssY7vl6QWf2fFbIIGjnhjvbdKlU0ed7QPgY1htTC86jQ==} + engines: {node: '>=4'} + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + +snapshots: + + '@babel/runtime@7.25.6': dependencies: regenerator-runtime: 0.14.1 - dev: false - /@coral-xyz/anchor@0.29.0: - resolution: {integrity: sha512-eny6QNG0WOwqV0zQ7cs/b1tIuzZGmP7U7EcH+ogt4Gdbl8HDmIYVMh/9aTmYZPaFWjtUaI8qSn73uYEXWfATdA==} - engines: {node: '>=11'} + '@coral-xyz/anchor@0.29.0': dependencies: '@coral-xyz/borsh': 0.29.0(@solana/web3.js@1.95.3) '@noble/hashes': 1.5.0 @@ -91,34 +913,20 @@ packages: - bufferutil - encoding - utf-8-validate - dev: false - /@coral-xyz/borsh@0.29.0(@solana/web3.js@1.95.3): - resolution: {integrity: sha512-s7VFVa3a0oqpkuRloWVPdCK7hMbAMY270geZOGfCnaqexrP5dTIpbEHL33req6IYPPJ0hYa71cdvJ1h6V55/oQ==} - engines: {node: '>=10'} - peerDependencies: - '@solana/web3.js': ^1.68.0 + '@coral-xyz/borsh@0.29.0(@solana/web3.js@1.95.3)': dependencies: '@solana/web3.js': 1.95.3 bn.js: 5.2.1 buffer-layout: 1.2.2 - dev: false - /@noble/curves@1.6.0: - resolution: {integrity: sha512-TlaHRXDehJuRNR9TfZDNQ45mMEd5dwUwmicsafcIX4SsNiqnCHKjE/1alYPd/lDRVhxdhUAlv8uEhMCI5zjIJQ==} - engines: {node: ^14.21.3 || >=16} + '@noble/curves@1.6.0': dependencies: '@noble/hashes': 1.5.0 - dev: false - /@noble/hashes@1.5.0: - resolution: {integrity: sha512-1j6kQFb7QRru7eKN3ZDvRcP13rugwdxZqCjbiAVZfIJwgj2A65UmT4TgARXGlXgnRkORLTDTrO19ZErt7+QXgA==} - engines: {node: ^14.21.3 || >=16} - dev: false + '@noble/hashes@1.5.0': {} - /@solana/buffer-layout-utils@0.2.0: - resolution: {integrity: sha512-szG4sxgJGktbuZYDg2FfNmkMi0DYQoVjN2h7ta1W1hPrwzarcFLBq9UpX1UjNXsNpT9dn+chgprtWGioUAr4/g==} - engines: {node: '>= 10'} + '@solana/buffer-layout-utils@0.2.0': dependencies: '@solana/buffer-layout': 4.0.1 '@solana/web3.js': 1.95.3 @@ -128,54 +936,34 @@ packages: - bufferutil - encoding - utf-8-validate - dev: false - /@solana/buffer-layout@4.0.1: - resolution: {integrity: sha512-E1ImOIAD1tBZFRdjeM4/pzTiTApC0AOBGwyAMS4fwIodCWArzJ3DWdoh8cKxeFM2fElkxBh2Aqts1BPC373rHA==} - engines: {node: '>=5.10'} + '@solana/buffer-layout@4.0.1': dependencies: buffer: 6.0.3 - dev: false - /@solana/codecs-core@2.0.0-experimental.8618508: - resolution: {integrity: sha512-JCz7mKjVKtfZxkuDtwMAUgA7YvJcA2BwpZaA1NOLcted4OMC4Prwa3DUe3f3181ixPYaRyptbF0Ikq2MbDkYEA==} - dev: false + '@solana/codecs-core@2.0.0-experimental.8618508': {} - /@solana/codecs-data-structures@2.0.0-experimental.8618508: - resolution: {integrity: sha512-sLpjL9sqzaDdkloBPV61Rht1tgaKq98BCtIKRuyscIrmVPu3wu0Bavk2n/QekmUzaTsj7K1pVSniM0YqCdnEBw==} + '@solana/codecs-data-structures@2.0.0-experimental.8618508': dependencies: '@solana/codecs-core': 2.0.0-experimental.8618508 '@solana/codecs-numbers': 2.0.0-experimental.8618508 - dev: false - /@solana/codecs-numbers@2.0.0-experimental.8618508: - resolution: {integrity: sha512-EXQKfzFr3CkKKNzKSZPOOOzchXsFe90TVONWsSnVkonO9z+nGKALE0/L9uBmIFGgdzhhU9QQVFvxBMclIDJo2Q==} + '@solana/codecs-numbers@2.0.0-experimental.8618508': dependencies: '@solana/codecs-core': 2.0.0-experimental.8618508 - dev: false - /@solana/codecs-strings@2.0.0-experimental.8618508(fastestsmallesttextencoderdecoder@1.0.22): - resolution: {integrity: sha512-b2yhinr1+oe+JDmnnsV0641KQqqDG8AQ16Z/x7GVWO+AWHMpRlHWVXOq8U1yhPMA4VXxl7i+D+C6ql0VGFp0GA==} - peerDependencies: - fastestsmallesttextencoderdecoder: ^1.0.22 + '@solana/codecs-strings@2.0.0-experimental.8618508(fastestsmallesttextencoderdecoder@1.0.22)': dependencies: '@solana/codecs-core': 2.0.0-experimental.8618508 '@solana/codecs-numbers': 2.0.0-experimental.8618508 fastestsmallesttextencoderdecoder: 1.0.22 - dev: false - /@solana/options@2.0.0-experimental.8618508: - resolution: {integrity: sha512-fy/nIRAMC3QHvnKi63KEd86Xr/zFBVxNW4nEpVEU2OT0gCEKwHY4Z55YHf7XujhyuM3PNpiBKg/YYw5QlRU4vg==} + '@solana/options@2.0.0-experimental.8618508': dependencies: '@solana/codecs-core': 2.0.0-experimental.8618508 '@solana/codecs-numbers': 2.0.0-experimental.8618508 - dev: false - /@solana/spl-token-metadata@0.1.2(@solana/web3.js@1.92.3)(fastestsmallesttextencoderdecoder@1.0.22): - resolution: {integrity: sha512-hJYnAJNkDrtkE2Q41YZhCpeOGU/0JgRFXbtrtOuGGeKc3pkEUHB9DDoxZAxx+XRno13GozUleyBi0qypz4c3bw==} - engines: {node: '>=16'} - peerDependencies: - '@solana/web3.js': ^1.87.6 + '@solana/spl-token-metadata@0.1.2(@solana/web3.js@1.92.3)(fastestsmallesttextencoderdecoder@1.0.22)': dependencies: '@solana/codecs-core': 2.0.0-experimental.8618508 '@solana/codecs-data-structures': 2.0.0-experimental.8618508 @@ -186,13 +974,8 @@ packages: '@solana/web3.js': 1.92.3 transitivePeerDependencies: - fastestsmallesttextencoderdecoder - dev: false - /@solana/spl-token@0.3.11(@solana/web3.js@1.92.3)(fastestsmallesttextencoderdecoder@1.0.22): - resolution: {integrity: sha512-bvohO3rIMSVL24Pb+I4EYTJ6cL82eFpInEXD/I8K8upOGjpqHsKUoAempR/RnUlI1qSFNyFlWJfu6MNUgfbCQQ==} - engines: {node: '>=16'} - peerDependencies: - '@solana/web3.js': ^1.88.0 + '@solana/spl-token@0.3.11(@solana/web3.js@1.92.3)(fastestsmallesttextencoderdecoder@1.0.22)': dependencies: '@solana/buffer-layout': 4.0.1 '@solana/buffer-layout-utils': 0.2.0 @@ -204,17 +987,12 @@ packages: - encoding - fastestsmallesttextencoderdecoder - utf-8-validate - dev: false - /@solana/spl-type-length-value@0.1.0: - resolution: {integrity: sha512-JBMGB0oR4lPttOZ5XiUGyvylwLQjt1CPJa6qQ5oM+MBCndfjz2TKKkw0eATlLLcYmq1jBVsNlJ2cD6ns2GR7lA==} - engines: {node: '>=16'} + '@solana/spl-type-length-value@0.1.0': dependencies: buffer: 6.0.3 - dev: false - /@solana/web3.js@1.92.3: - resolution: {integrity: sha512-NVBWvb9zdJIAx6X+caXaIICCEQfQaQ8ygykCjJW4u2z/sIKcvPj3ZIIllnx0MWMc3IxGq15ozGYDOQIMbwUcHw==} + '@solana/web3.js@1.92.3': dependencies: '@babel/runtime': 7.25.6 '@noble/curves': 1.6.0 @@ -235,10 +1013,8 @@ packages: - bufferutil - encoding - utf-8-validate - dev: false - /@solana/web3.js@1.95.3: - resolution: {integrity: sha512-O6rPUN0w2fkNqx/Z3QJMB9L225Ex10PRDH8bTaIUPZXMPV0QP8ZpPvjQnXK+upUczlRgzHzd6SjKIha1p+I6og==} + '@solana/web3.js@1.95.3': dependencies: '@babel/runtime': 7.25.6 '@noble/curves': 1.6.0 @@ -259,214 +1035,124 @@ packages: - bufferutil - encoding - utf-8-validate - dev: false - /@swc/helpers@0.5.13: - resolution: {integrity: sha512-UoKGxQ3r5kYI9dALKJapMmuK+1zWM/H17Z1+iwnNmzcJRnfFuevZs375TA5rW31pu4BS4NoSy1fRsexDXfWn5w==} + '@swc/helpers@0.5.13': dependencies: tslib: 2.7.0 - dev: false - /@types/chai@4.3.12: - resolution: {integrity: sha512-zNKDHG/1yxm8Il6uCCVsm+dRdEsJlFoDu73X17y09bId6UwoYww+vFBsAcRzl8knM1sab3Dp1VRikFQwDOtDDw==} - dev: false + '@types/chai@4.3.12': {} - /@types/connect@3.4.38: - resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==} + '@types/connect@3.4.38': dependencies: '@types/node': 22.6.0 - dev: false - /@types/json5@0.0.29: - resolution: {integrity: sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==} - requiresBuild: true - dev: false + '@types/json5@0.0.29': optional: true - /@types/mocha@9.1.1: - resolution: {integrity: sha512-Z61JK7DKDtdKTWwLeElSEBcWGRLY8g95ic5FoQqI9CMx0ns/Ghep3B4DfcEimiKMvtamNVULVNKEsiwV3aQmXw==} - dev: false + '@types/mocha@9.1.1': {} - /@types/node@12.20.55: - resolution: {integrity: sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ==} - dev: false + '@types/node@12.20.55': {} - /@types/node@14.18.63: - resolution: {integrity: sha512-fAtCfv4jJg+ExtXhvCkCqUKZ+4ok/JQk01qDKhL5BDDoS3AxKXhV5/MAVUZyQnSEd2GT92fkgZl0pz0Q0AzcIQ==} - dev: false + '@types/node@14.18.63': {} - /@types/node@22.6.0: - resolution: {integrity: sha512-QyR8d5bmq+eR72TwQDfujwShHMcIrWIYsaQFtXRE58MHPTEKUNxjxvl0yS0qPMds5xbSDWtp7ZpvGFtd7dfMdQ==} + '@types/node@22.6.0': dependencies: undici-types: 6.19.8 - dev: false - /@types/pbkdf2@3.1.2: - resolution: {integrity: sha512-uRwJqmiXmh9++aSu1VNEn3iIxWOhd8AHXNSdlaLfdAAdSTY9jYVeGWnzejM3dvrkbqE3/hyQkQQ29IFATEGlew==} + '@types/pbkdf2@3.1.2': dependencies: '@types/node': 22.6.0 - dev: false - /@types/secp256k1@4.0.6: - resolution: {integrity: sha512-hHxJU6PAEUn0TP4S/ZOzuTUvJWuZ6eIKeNKb5RBpODvSl6hp1Wrw4s7ATY50rklRCScUDpHzVA/DQdSjJ3UoYQ==} + '@types/secp256k1@4.0.6': dependencies: '@types/node': 22.6.0 - dev: false - /@types/uuid@8.3.4: - resolution: {integrity: sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==} - dev: false + '@types/uuid@8.3.4': {} - /@types/ws@7.4.7: - resolution: {integrity: sha512-JQbbmxZTZehdc2iszGKs5oC3NFnjeay7mtAWrdt7qNtAVK0g19muApzAy4bm9byz79xa2ZnO/BOBC2R8RC5Lww==} + '@types/ws@7.4.7': dependencies: '@types/node': 22.6.0 - dev: false - /@types/ws@8.5.12: - resolution: {integrity: sha512-3tPRkv1EtkDpzlgyKyI8pGsGZAGPEaXeu0DOj5DI25Ja91bdAYddYHbADRYVrZMRbfW+1l5YwXVDKohDJNQxkQ==} + '@types/ws@8.5.12': dependencies: '@types/node': 22.6.0 - dev: false - /@ungap/promise-all-settled@1.1.2: - resolution: {integrity: sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==} - dev: false + '@ungap/promise-all-settled@1.1.2': {} - /JSONStream@1.3.5: - resolution: {integrity: sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==} - hasBin: true + JSONStream@1.3.5: dependencies: jsonparse: 1.3.1 through: 2.3.8 - dev: false - /agentkeepalive@4.5.0: - resolution: {integrity: sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==} - engines: {node: '>= 8.0.0'} + agentkeepalive@4.5.0: dependencies: humanize-ms: 1.2.1 - dev: false - /ansi-colors@4.1.1: - resolution: {integrity: sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==} - engines: {node: '>=6'} - dev: false + ansi-colors@4.1.1: {} - /ansi-regex@5.0.1: - resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} - engines: {node: '>=8'} - dev: false + ansi-regex@5.0.1: {} - /ansi-styles@4.3.0: - resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} - engines: {node: '>=8'} + ansi-styles@4.3.0: dependencies: color-convert: 2.0.1 - dev: false - /anymatch@3.1.3: - resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} - engines: {node: '>= 8'} + anymatch@3.1.3: dependencies: normalize-path: 3.0.0 picomatch: 2.3.1 - dev: false - /argparse@2.0.1: - resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} - dev: false + argparse@2.0.1: {} - /arrify@1.0.1: - resolution: {integrity: sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==} - engines: {node: '>=0.10.0'} - dev: false + arrify@1.0.1: {} - /assertion-error@1.1.0: - resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==} - dev: false + assertion-error@1.1.0: {} - /balanced-match@1.0.2: - resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} - dev: false + balanced-match@1.0.2: {} - /base-x@3.0.10: - resolution: {integrity: sha512-7d0s06rR9rYaIWHkpfLIFICM/tkSVdoPC9qYAQRpxn9DdKNWNsKC0uk++akckyLq16Tx2WIinnZ6WRriAt6njQ==} + base-x@3.0.10: dependencies: safe-buffer: 5.2.1 - dev: false - /base64-js@1.5.1: - resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} - dev: false + base64-js@1.5.1: {} - /bigint-buffer@1.1.5: - resolution: {integrity: sha512-trfYco6AoZ+rKhKnxA0hgX0HAbVP/s808/EuDSe2JDzUnCp/xAsli35Orvk67UrTEcwuxZqYZDmfA2RXJgxVvA==} - engines: {node: '>= 10.0.0'} - requiresBuild: true + bigint-buffer@1.1.5: dependencies: bindings: 1.5.0 - dev: false - /bignumber.js@9.1.2: - resolution: {integrity: sha512-2/mKyZH9K85bzOEfhXDBFZTGd1CTs+5IHpeFQo9luiBG7hghdC851Pj2WAhb6E3R6b9tZj/XKhbg4fum+Kepug==} - dev: false + bignumber.js@9.1.2: {} - /binary-extensions@2.3.0: - resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} - engines: {node: '>=8'} - dev: false + binary-extensions@2.3.0: {} - /bindings@1.5.0: - resolution: {integrity: sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==} + bindings@1.5.0: dependencies: file-uri-to-path: 1.0.0 - dev: false - /blakejs@1.2.1: - resolution: {integrity: sha512-QXUSXI3QVc/gJME0dBpXrag1kbzOqCjCX8/b54ntNyW6sjtoqxqRk3LTmXzaJoh71zMsDCjM+47jS7XiwN/+fQ==} - dev: false + blakejs@1.2.1: {} - /bn.js@4.12.0: - resolution: {integrity: sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==} - dev: false + bn.js@4.12.0: {} - /bn.js@5.2.1: - resolution: {integrity: sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==} - dev: false + bn.js@5.2.1: {} - /borsh@0.7.0: - resolution: {integrity: sha512-CLCsZGIBCFnPtkNnieW/a8wmreDmfUtjU2m9yHrzPXIlNbqVs0AQrSatSG6vdNYUqdc83tkQi2eHfF98ubzQLA==} + borsh@0.7.0: dependencies: bn.js: 5.2.1 bs58: 4.0.1 text-encoding-utf-8: 1.0.2 - dev: false - /brace-expansion@1.1.11: - resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + brace-expansion@1.1.11: dependencies: balanced-match: 1.0.2 concat-map: 0.0.1 - dev: false - /braces@3.0.2: - resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} - engines: {node: '>=8'} + braces@3.0.2: dependencies: fill-range: 7.0.1 - dev: false - /brorand@1.1.0: - resolution: {integrity: sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w==} - dev: false + brorand@1.1.0: {} - /browser-stdout@1.3.1: - resolution: {integrity: sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==} - dev: false + browser-stdout@1.3.1: {} - /browserify-aes@1.2.0: - resolution: {integrity: sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==} + browserify-aes@1.2.0: dependencies: buffer-xor: 1.0.3 cipher-base: 1.0.4 @@ -474,58 +1160,35 @@ packages: evp_bytestokey: 1.0.3 inherits: 2.0.4 safe-buffer: 5.2.1 - dev: false - /bs58@4.0.1: - resolution: {integrity: sha512-Ok3Wdf5vOIlBrgCvTq96gBkJw+JUEzdBgyaza5HLtPm7yTHkjRy8+JzNyHF7BHa0bNWOQIp3m5YF0nnFcOIKLw==} + bs58@4.0.1: dependencies: base-x: 3.0.10 - dev: false - /bs58check@2.1.2: - resolution: {integrity: sha512-0TS1jicxdU09dwJMNZtVAfzPi6Q6QeN0pM1Fkzrjn+XYHvzMKPU3pHVpva+769iNVSfIYWf7LJ6WR+BuuMf8cA==} + bs58check@2.1.2: dependencies: bs58: 4.0.1 create-hash: 1.2.0 safe-buffer: 5.2.1 - dev: false - /buffer-from@1.1.2: - resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} - dev: false + buffer-from@1.1.2: {} - /buffer-layout@1.2.2: - resolution: {integrity: sha512-kWSuLN694+KTk8SrYvCqwP2WcgQjoRCiF5b4QDvkkz8EmgD+aWAIceGFKMIAdmF/pH+vpgNV3d3kAKorcdAmWA==} - engines: {node: '>=4.5'} - dev: false + buffer-layout@1.2.2: {} - /buffer-xor@1.0.3: - resolution: {integrity: sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ==} - dev: false + buffer-xor@1.0.3: {} - /buffer@6.0.3: - resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + buffer@6.0.3: dependencies: base64-js: 1.5.1 ieee754: 1.2.1 - dev: false - /bufferutil@4.0.8: - resolution: {integrity: sha512-4T53u4PdgsXqKaIctwF8ifXlRTTmEPJ8iEPWFdGZvcf7sbwYo6FKFEX9eNNAnzFZ7EzJAQ3CJeOtCRA4rDp7Pw==} - engines: {node: '>=6.14.2'} - requiresBuild: true + bufferutil@4.0.8: dependencies: node-gyp-build: 4.8.2 - dev: false - /camelcase@6.3.0: - resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} - engines: {node: '>=10'} - dev: false + camelcase@6.3.0: {} - /chai@4.4.1: - resolution: {integrity: sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==} - engines: {node: '>=4'} + chai@4.4.1: dependencies: assertion-error: 1.1.0 check-error: 1.0.3 @@ -534,25 +1197,17 @@ packages: loupe: 2.3.7 pathval: 1.1.1 type-detect: 4.0.8 - dev: false - /chalk@4.1.2: - resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} - engines: {node: '>=10'} + chalk@4.1.2: dependencies: ansi-styles: 4.3.0 supports-color: 7.2.0 - dev: false - /check-error@1.0.3: - resolution: {integrity: sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==} + check-error@1.0.3: dependencies: get-func-name: 2.0.2 - dev: false - /chokidar@3.5.3: - resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==} - engines: {node: '>= 8.10.0'} + chokidar@3.5.3: dependencies: anymatch: 3.1.3 braces: 3.0.2 @@ -563,54 +1218,37 @@ packages: readdirp: 3.6.0 optionalDependencies: fsevents: 2.3.3 - dev: false - /cipher-base@1.0.4: - resolution: {integrity: sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==} + cipher-base@1.0.4: dependencies: inherits: 2.0.4 safe-buffer: 5.2.1 - dev: false - /cliui@7.0.4: - resolution: {integrity: sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==} + cliui@7.0.4: dependencies: string-width: 4.2.3 strip-ansi: 6.0.1 wrap-ansi: 7.0.0 - dev: false - /color-convert@2.0.1: - resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} - engines: {node: '>=7.0.0'} + color-convert@2.0.1: dependencies: color-name: 1.1.4 - dev: false - /color-name@1.1.4: - resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} - dev: false + color-name@1.1.4: {} - /commander@2.20.3: - resolution: {integrity: sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==} - dev: false + commander@2.20.3: {} - /concat-map@0.0.1: - resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} - dev: false + concat-map@0.0.1: {} - /create-hash@1.2.0: - resolution: {integrity: sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==} + create-hash@1.2.0: dependencies: cipher-base: 1.0.4 inherits: 2.0.4 md5.js: 1.3.5 ripemd160: 2.0.2 sha.js: 2.4.11 - dev: false - /create-hmac@1.1.7: - resolution: {integrity: sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==} + create-hmac@1.1.7: dependencies: cipher-base: 1.0.4 create-hash: 1.2.0 @@ -618,70 +1256,38 @@ packages: ripemd160: 2.0.2 safe-buffer: 5.2.1 sha.js: 2.4.11 - dev: false - /cross-fetch@3.1.8: - resolution: {integrity: sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==} + cross-fetch@3.1.8: dependencies: node-fetch: 2.7.0 transitivePeerDependencies: - encoding - dev: false - /crypto-hash@1.3.0: - resolution: {integrity: sha512-lyAZ0EMyjDkVvz8WOeVnuCPvKVBXcMv1l5SVqO1yC7PzTwrD/pPje/BIRbWhMoPe436U+Y2nD7f5bFx0kt+Sbg==} - engines: {node: '>=8'} - dev: false + crypto-hash@1.3.0: {} - /debug@4.3.3(supports-color@8.1.1): - resolution: {integrity: sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==} - engines: {node: '>=6.0'} - peerDependencies: - supports-color: '*' - peerDependenciesMeta: - supports-color: - optional: true + debug@4.3.3(supports-color@8.1.1): dependencies: ms: 2.1.2 supports-color: 8.1.1 - dev: false - - /decamelize@4.0.0: - resolution: {integrity: sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==} - engines: {node: '>=10'} - dev: false - /deep-eql@4.1.3: - resolution: {integrity: sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==} - engines: {node: '>=6'} + decamelize@4.0.0: {} + + deep-eql@4.1.3: dependencies: type-detect: 4.0.8 - dev: false - /delay@5.0.0: - resolution: {integrity: sha512-ReEBKkIfe4ya47wlPYf/gu5ib6yUG0/Aez0JQZQz94kiWtRQvZIQbTiehsnwHvLSWJnQdhVeqYue7Id1dKr0qw==} - engines: {node: '>=10'} - dev: false + delay@5.0.0: {} - /diff@3.5.0: - resolution: {integrity: sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==} - engines: {node: '>=0.3.1'} - dev: false + diff@3.5.0: {} - /diff@5.0.0: - resolution: {integrity: sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==} - engines: {node: '>=0.3.1'} - dev: false + diff@5.0.0: {} - /dot-case@3.0.4: - resolution: {integrity: sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==} + dot-case@3.0.4: dependencies: no-case: 3.0.4 tslib: 2.7.0 - dev: false - /elliptic@6.5.5: - resolution: {integrity: sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw==} + elliptic@6.5.5: dependencies: bn.js: 4.12.0 brorand: 1.1.0 @@ -690,34 +1296,20 @@ packages: inherits: 2.0.4 minimalistic-assert: 1.0.1 minimalistic-crypto-utils: 1.0.1 - dev: false - /emoji-regex@8.0.0: - resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} - dev: false + emoji-regex@8.0.0: {} - /es6-promise@4.2.8: - resolution: {integrity: sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==} - dev: false + es6-promise@4.2.8: {} - /es6-promisify@5.0.0: - resolution: {integrity: sha512-C+d6UdsYDk0lMebHNR4S2NybQMMngAOnOwYBQjTOiv0MkoJMP0Myw2mgpDLBcpfCmRLxyFqYhS/CfOENq4SJhQ==} + es6-promisify@5.0.0: dependencies: es6-promise: 4.2.8 - dev: false - /escalade@3.1.2: - resolution: {integrity: sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==} - engines: {node: '>=6'} - dev: false + escalade@3.1.2: {} - /escape-string-regexp@4.0.0: - resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} - engines: {node: '>=10'} - dev: false + escape-string-regexp@4.0.0: {} - /ethereum-cryptography@0.1.3: - resolution: {integrity: sha512-w8/4x1SGGzc+tO97TASLja6SLd3fRIK2tLVcV2Gx4IB21hE19atll5Cq9o3d0ZmAYC/8aw0ipieTSiekAea4SQ==} + ethereum-cryptography@0.1.3: dependencies: '@types/pbkdf2': 3.1.2 '@types/secp256k1': 4.0.6 @@ -734,91 +1326,49 @@ packages: scrypt-js: 3.0.1 secp256k1: 4.0.3 setimmediate: 1.0.5 - dev: false - /eventemitter3@4.0.7: - resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} - dev: false + eventemitter3@4.0.7: {} - /eventemitter3@5.0.1: - resolution: {integrity: sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==} - dev: false + eventemitter3@5.0.1: {} - /evp_bytestokey@1.0.3: - resolution: {integrity: sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==} + evp_bytestokey@1.0.3: dependencies: md5.js: 1.3.5 safe-buffer: 5.2.1 - dev: false - /eyes@0.1.8: - resolution: {integrity: sha512-GipyPsXO1anza0AOZdy69Im7hGFCNB7Y/NGjDlZGJ3GJJLtwNSb2vrzYrTYJRrRloVx7pl+bhUaTB8yiccPvFQ==} - engines: {node: '> 0.1.90'} - dev: false + eyes@0.1.8: {} - /fast-stable-stringify@1.0.0: - resolution: {integrity: sha512-wpYMUmFu5f00Sm0cj2pfivpmawLZ0NKdviQ4w9zJeR8JVtOpOxHmLaJuj0vxvGqMJQWyP/COUkF75/57OKyRag==} - dev: false + fast-stable-stringify@1.0.0: {} - /fastestsmallesttextencoderdecoder@1.0.22: - resolution: {integrity: sha512-Pb8d48e+oIuY4MaM64Cd7OW1gt4nxCHs7/ddPPZ/Ic3sg8yVGM7O9wDvZ7us6ScaUupzM+pfBolwtYhN1IxBIw==} - dev: false + fastestsmallesttextencoderdecoder@1.0.22: {} - /file-uri-to-path@1.0.0: - resolution: {integrity: sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==} - dev: false + file-uri-to-path@1.0.0: {} - /fill-range@7.0.1: - resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} - engines: {node: '>=8'} + fill-range@7.0.1: dependencies: to-regex-range: 5.0.1 - dev: false - /find-up@5.0.0: - resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} - engines: {node: '>=10'} + find-up@5.0.0: dependencies: locate-path: 6.0.0 path-exists: 4.0.0 - dev: false - /flat@5.0.2: - resolution: {integrity: sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==} - hasBin: true - dev: false + flat@5.0.2: {} - /fs.realpath@1.0.0: - resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} - dev: false + fs.realpath@1.0.0: {} - /fsevents@2.3.3: - resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} - engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} - os: [darwin] - requiresBuild: true - dev: false + fsevents@2.3.3: optional: true - /get-caller-file@2.0.5: - resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} - engines: {node: 6.* || 8.* || >= 10.*} - dev: false + get-caller-file@2.0.5: {} - /get-func-name@2.0.2: - resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==} - dev: false + get-func-name@2.0.2: {} - /glob-parent@5.1.2: - resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} - engines: {node: '>= 6'} + glob-parent@5.1.2: dependencies: is-glob: 4.0.3 - dev: false - /glob@7.2.0: - resolution: {integrity: sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==} - deprecated: Glob versions prior to v9 are no longer supported + glob@7.2.0: dependencies: fs.realpath: 1.0.0 inflight: 1.0.6 @@ -826,124 +1376,68 @@ packages: minimatch: 3.1.2 once: 1.4.0 path-is-absolute: 1.0.1 - dev: false - /growl@1.10.5: - resolution: {integrity: sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==} - engines: {node: '>=4.x'} - dev: false + growl@1.10.5: {} - /has-flag@4.0.0: - resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} - engines: {node: '>=8'} - dev: false + has-flag@4.0.0: {} - /hash-base@3.1.0: - resolution: {integrity: sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==} - engines: {node: '>=4'} + hash-base@3.1.0: dependencies: inherits: 2.0.4 readable-stream: 3.6.2 safe-buffer: 5.2.1 - dev: false - /hash.js@1.1.7: - resolution: {integrity: sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==} + hash.js@1.1.7: dependencies: inherits: 2.0.4 minimalistic-assert: 1.0.1 - dev: false - /he@1.2.0: - resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==} - hasBin: true - dev: false + he@1.2.0: {} - /hmac-drbg@1.0.1: - resolution: {integrity: sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg==} + hmac-drbg@1.0.1: dependencies: hash.js: 1.1.7 minimalistic-assert: 1.0.1 minimalistic-crypto-utils: 1.0.1 - dev: false - /humanize-ms@1.2.1: - resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} + humanize-ms@1.2.1: dependencies: ms: 2.1.3 - dev: false - /ieee754@1.2.1: - resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} - dev: false + ieee754@1.2.1: {} - /inflight@1.0.6: - resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} - deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. + inflight@1.0.6: dependencies: once: 1.4.0 wrappy: 1.0.2 - dev: false - /inherits@2.0.4: - resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} - dev: false + inherits@2.0.4: {} - /is-binary-path@2.1.0: - resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} - engines: {node: '>=8'} + is-binary-path@2.1.0: dependencies: binary-extensions: 2.3.0 - dev: false - /is-extglob@2.1.1: - resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} - engines: {node: '>=0.10.0'} - dev: false + is-extglob@2.1.1: {} - /is-fullwidth-code-point@3.0.0: - resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} - engines: {node: '>=8'} - dev: false + is-fullwidth-code-point@3.0.0: {} - /is-glob@4.0.3: - resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} - engines: {node: '>=0.10.0'} + is-glob@4.0.3: dependencies: is-extglob: 2.1.1 - dev: false - /is-number@7.0.0: - resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} - engines: {node: '>=0.12.0'} - dev: false + is-number@7.0.0: {} - /is-plain-obj@2.1.0: - resolution: {integrity: sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==} - engines: {node: '>=8'} - dev: false + is-plain-obj@2.1.0: {} - /is-unicode-supported@0.1.0: - resolution: {integrity: sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==} - engines: {node: '>=10'} - dev: false + is-unicode-supported@0.1.0: {} - /isexe@2.0.0: - resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} - dev: false + isexe@2.0.0: {} - /isomorphic-ws@4.0.1(ws@7.5.10): - resolution: {integrity: sha512-BhBvN2MBpWTaSHdWRb/bwdZJ1WaehQ2L1KngkCkfLUGF0mAWAT1sQUQacEmQ0jXkFw/czDXPNQSL5u2/Krsz1w==} - peerDependencies: - ws: '*' + isomorphic-ws@4.0.1(ws@7.5.10): dependencies: ws: 7.5.10 - dev: false - /jayson@4.1.2: - resolution: {integrity: sha512-5nzMWDHy6f+koZOuYsArh2AXs73NfWYVlFyJJuCedr93GpY+Ku8qq10ropSXVfHK+H0T6paA88ww+/dV+1fBNA==} - engines: {node: '>=8'} - hasBin: true + jayson@4.1.2: dependencies: '@types/connect': 3.4.38 '@types/node': 12.20.55 @@ -960,118 +1454,70 @@ packages: transitivePeerDependencies: - bufferutil - utf-8-validate - dev: false - /js-yaml@4.1.0: - resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} - hasBin: true + js-yaml@4.1.0: dependencies: argparse: 2.0.1 - dev: false - /json-stringify-safe@5.0.1: - resolution: {integrity: sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==} - dev: false + json-stringify-safe@5.0.1: {} - /json5@1.0.2: - resolution: {integrity: sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==} - hasBin: true - requiresBuild: true + json5@1.0.2: dependencies: minimist: 1.2.8 - dev: false optional: true - /jsonparse@1.3.1: - resolution: {integrity: sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==} - engines: {'0': node >= 0.2.0} - dev: false + jsonparse@1.3.1: {} - /keccak@3.0.4: - resolution: {integrity: sha512-3vKuW0jV8J3XNTzvfyicFR5qvxrSAGl7KIhvgOu5cmWwM7tZRj3fMbj/pfIf4be7aznbc+prBWGjywox/g2Y6Q==} - engines: {node: '>=10.0.0'} - requiresBuild: true + keccak@3.0.4: dependencies: node-addon-api: 2.0.2 node-gyp-build: 4.8.2 readable-stream: 3.6.2 - dev: false - /locate-path@6.0.0: - resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} - engines: {node: '>=10'} + locate-path@6.0.0: dependencies: p-locate: 5.0.0 - dev: false - /log-symbols@4.1.0: - resolution: {integrity: sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==} - engines: {node: '>=10'} + log-symbols@4.1.0: dependencies: chalk: 4.1.2 is-unicode-supported: 0.1.0 - dev: false - /loupe@2.3.7: - resolution: {integrity: sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==} + loupe@2.3.7: dependencies: get-func-name: 2.0.2 - dev: false - /lower-case@2.0.2: - resolution: {integrity: sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==} + lower-case@2.0.2: dependencies: tslib: 2.7.0 - dev: false - /make-error@1.3.6: - resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} - dev: false + make-error@1.3.6: {} - /md5.js@1.3.5: - resolution: {integrity: sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==} + md5.js@1.3.5: dependencies: hash-base: 3.1.0 inherits: 2.0.4 safe-buffer: 5.2.1 - dev: false - /minimalistic-assert@1.0.1: - resolution: {integrity: sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==} - dev: false + minimalistic-assert@1.0.1: {} - /minimalistic-crypto-utils@1.0.1: - resolution: {integrity: sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg==} - dev: false + minimalistic-crypto-utils@1.0.1: {} - /minimatch@3.1.2: - resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + minimatch@3.1.2: dependencies: brace-expansion: 1.1.11 - dev: false - /minimatch@4.2.1: - resolution: {integrity: sha512-9Uq1ChtSZO+Mxa/CL1eGizn2vRn3MlLgzhT0Iz8zaY8NdvxvB0d5QdPFmCKf7JKA9Lerx5vRrnwO03jsSfGG9g==} - engines: {node: '>=10'} + minimatch@4.2.1: dependencies: brace-expansion: 1.1.11 - dev: false - /minimist@1.2.8: - resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} - dev: false + minimist@1.2.8: {} - /mkdirp@0.5.6: - resolution: {integrity: sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==} - hasBin: true + mkdirp@0.5.6: dependencies: minimist: 1.2.8 - dev: false - /mocha@9.2.2: - resolution: {integrity: sha512-L6XC3EdwT6YrIk0yXpavvLkn8h+EU+Y5UcCHKECyMbdUIxyMuZj4bX4U9e1nvnvUUvQVsV2VHQr5zLdcUkhW/g==} - engines: {node: '>= 12.0.0'} - hasBin: true + mocha@9.2.2: dependencies: '@ungap/promise-all-settled': 1.1.2 ansi-colors: 4.1.1 @@ -1097,155 +1543,84 @@ packages: yargs: 16.2.0 yargs-parser: 20.2.4 yargs-unparser: 2.0.0 - dev: false - /ms@2.1.2: - resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} - dev: false + ms@2.1.2: {} - /ms@2.1.3: - resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} - dev: false + ms@2.1.3: {} - /nanoid@3.3.1: - resolution: {integrity: sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw==} - engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} - hasBin: true - dev: false + nanoid@3.3.1: {} - /no-case@3.0.4: - resolution: {integrity: sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==} + no-case@3.0.4: dependencies: lower-case: 2.0.2 tslib: 2.7.0 - dev: false - /node-addon-api@2.0.2: - resolution: {integrity: sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA==} - dev: false + node-addon-api@2.0.2: {} - /node-fetch@2.7.0: - resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} - engines: {node: 4.x || >=6.0.0} - peerDependencies: - encoding: ^0.1.0 - peerDependenciesMeta: - encoding: - optional: true + node-fetch@2.7.0: dependencies: whatwg-url: 5.0.0 - dev: false - /node-gyp-build@4.8.2: - resolution: {integrity: sha512-IRUxE4BVsHWXkV/SFOut4qTlagw2aM8T5/vnTsmrHJvVoKueJHRc/JaFND7QDDc61kLYUJ6qlZM3sqTSyx2dTw==} - hasBin: true - dev: false + node-gyp-build@4.8.2: {} - /normalize-path@3.0.0: - resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} - engines: {node: '>=0.10.0'} - dev: false + normalize-path@3.0.0: {} - /once@1.4.0: - resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + once@1.4.0: dependencies: wrappy: 1.0.2 - dev: false - /p-limit@3.1.0: - resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} - engines: {node: '>=10'} + p-limit@3.1.0: dependencies: yocto-queue: 0.1.0 - dev: false - /p-locate@5.0.0: - resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} - engines: {node: '>=10'} + p-locate@5.0.0: dependencies: p-limit: 3.1.0 - dev: false - /pako@2.1.0: - resolution: {integrity: sha512-w+eufiZ1WuJYgPXbV/PO3NCMEc3xqylkKHzp8bxp1uW4qaSNQUkwmLLEc3kKsfz8lpV1F8Ht3U1Cm+9Srog2ug==} - dev: false + pako@2.1.0: {} - /path-exists@4.0.0: - resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} - engines: {node: '>=8'} - dev: false + path-exists@4.0.0: {} - /path-is-absolute@1.0.1: - resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} - engines: {node: '>=0.10.0'} - dev: false + path-is-absolute@1.0.1: {} - /pathval@1.1.1: - resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==} - dev: false + pathval@1.1.1: {} - /pbkdf2@3.1.2: - resolution: {integrity: sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==} - engines: {node: '>=0.12'} + pbkdf2@3.1.2: dependencies: create-hash: 1.2.0 create-hmac: 1.1.7 ripemd160: 2.0.2 safe-buffer: 5.2.1 sha.js: 2.4.11 - dev: false - /picomatch@2.3.1: - resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} - engines: {node: '>=8.6'} - dev: false + picomatch@2.3.1: {} - /prettier@2.8.8: - resolution: {integrity: sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==} - engines: {node: '>=10.13.0'} - hasBin: true - dev: false + prettier@2.8.8: {} - /randombytes@2.1.0: - resolution: {integrity: sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==} + randombytes@2.1.0: dependencies: safe-buffer: 5.2.1 - dev: false - /readable-stream@3.6.2: - resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} - engines: {node: '>= 6'} + readable-stream@3.6.2: dependencies: inherits: 2.0.4 string_decoder: 1.3.0 util-deprecate: 1.0.2 - dev: false - /readdirp@3.6.0: - resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} - engines: {node: '>=8.10.0'} + readdirp@3.6.0: dependencies: picomatch: 2.3.1 - dev: false - /regenerator-runtime@0.14.1: - resolution: {integrity: sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==} - dev: false + regenerator-runtime@0.14.1: {} - /require-directory@2.1.1: - resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} - engines: {node: '>=0.10.0'} - dev: false + require-directory@2.1.1: {} - /ripemd160@2.0.2: - resolution: {integrity: sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==} + ripemd160@2.0.2: dependencies: hash-base: 3.1.0 inherits: 2.0.4 - dev: false - /rpc-websockets@7.10.0: - resolution: {integrity: sha512-cemZ6RiDtYZpPiBzYijdOrkQQzmBCmug0E9SdRH2gIUNT15ql4mwCYWIp0VnSZq6Qrw/JkGUygp4PrK1y9KfwQ==} + rpc-websockets@7.10.0: dependencies: '@babel/runtime': 7.25.6 eventemitter3: 4.0.7 @@ -1254,10 +1629,8 @@ packages: optionalDependencies: bufferutil: 4.0.8 utf-8-validate: 5.0.10 - dev: false - /rpc-websockets@8.0.1: - resolution: {integrity: sha512-PptrPRK40uQvifq5sCcObmqInVcZXhy+RRrirzdE5KUPvDI47y1wPvfckD2QzqngOU9xaPW/dT+G+b+wj6M1MQ==} + rpc-websockets@8.0.1: dependencies: eventemitter3: 4.0.7 uuid: 8.3.2 @@ -1265,10 +1638,8 @@ packages: optionalDependencies: bufferutil: 4.0.8 utf-8-validate: 5.0.10 - dev: false - /rpc-websockets@9.0.2: - resolution: {integrity: sha512-YzggvfItxMY3Lwuax5rC18inhbjJv9Py7JXRHxTIi94JOLrqBsSsUUc5bbl5W6c11tXhdfpDPK0KzBhoGe8jjw==} + rpc-websockets@9.0.2: dependencies: '@swc/helpers': 0.5.13 '@types/uuid': 8.3.4 @@ -1280,165 +1651,93 @@ packages: optionalDependencies: bufferutil: 4.0.8 utf-8-validate: 5.0.10 - dev: false - /safe-buffer@5.2.1: - resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} - dev: false + safe-buffer@5.2.1: {} - /scrypt-js@3.0.1: - resolution: {integrity: sha512-cdwTTnqPu0Hyvf5in5asVdZocVDTNRmR7XEcJuIzMjJeSHybHl7vpB66AzwTaIg6CLSbtjcxc8fqcySfnTkccA==} - dev: false + scrypt-js@3.0.1: {} - /secp256k1@4.0.3: - resolution: {integrity: sha512-NLZVf+ROMxwtEj3Xa562qgv2BK5e2WNmXPiOdVIPLgs6lyTzMvBq0aWTYMI5XCP9jZMVKOcqZLw/Wc4vDkuxhA==} - engines: {node: '>=10.0.0'} - requiresBuild: true + secp256k1@4.0.3: dependencies: elliptic: 6.5.5 node-addon-api: 2.0.2 node-gyp-build: 4.8.2 - dev: false - /serialize-javascript@6.0.0: - resolution: {integrity: sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==} + serialize-javascript@6.0.0: dependencies: randombytes: 2.1.0 - dev: false - /setimmediate@1.0.5: - resolution: {integrity: sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==} - dev: false + setimmediate@1.0.5: {} - /sha.js@2.4.11: - resolution: {integrity: sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==} - hasBin: true + sha.js@2.4.11: dependencies: inherits: 2.0.4 safe-buffer: 5.2.1 - dev: false - /snake-case@3.0.4: - resolution: {integrity: sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==} + snake-case@3.0.4: dependencies: dot-case: 3.0.4 tslib: 2.7.0 - dev: false - /source-map-support@0.5.21: - resolution: {integrity: sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==} + source-map-support@0.5.21: dependencies: buffer-from: 1.1.2 source-map: 0.6.1 - dev: false - /source-map@0.6.1: - resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} - engines: {node: '>=0.10.0'} - dev: false + source-map@0.6.1: {} - /string-width@4.2.3: - resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} - engines: {node: '>=8'} + string-width@4.2.3: dependencies: emoji-regex: 8.0.0 is-fullwidth-code-point: 3.0.0 strip-ansi: 6.0.1 - dev: false - /string_decoder@1.3.0: - resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==} + string_decoder@1.3.0: dependencies: safe-buffer: 5.2.1 - dev: false - /strip-ansi@6.0.1: - resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} - engines: {node: '>=8'} + strip-ansi@6.0.1: dependencies: ansi-regex: 5.0.1 - dev: false - /strip-bom@3.0.0: - resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==} - engines: {node: '>=4'} - requiresBuild: true - dev: false + strip-bom@3.0.0: optional: true - /strip-json-comments@3.1.1: - resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} - engines: {node: '>=8'} - dev: false + strip-json-comments@3.1.1: {} - /superstruct@0.15.5: - resolution: {integrity: sha512-4AOeU+P5UuE/4nOUkmcQdW5y7i9ndt1cQd/3iUe+LTz3RxESf/W/5lg4B74HbDMMv8PHnPnGCQFH45kBcrQYoQ==} - dev: false + superstruct@0.15.5: {} - /superstruct@1.0.4: - resolution: {integrity: sha512-7JpaAoX2NGyoFlI9NBh66BQXGONc+uE+MRS5i2iOBKuS4e+ccgMDjATgZldkah+33DakBxDHiss9kvUcGAO8UQ==} - engines: {node: '>=14.0.0'} - dev: false + superstruct@1.0.4: {} - /superstruct@2.0.2: - resolution: {integrity: sha512-uV+TFRZdXsqXTL2pRvujROjdZQ4RAlBUS5BTh9IGm+jTqQntYThciG/qu57Gs69yjnVUSqdxF9YLmSnpupBW9A==} - engines: {node: '>=14.0.0'} - dev: false + superstruct@2.0.2: {} - /supports-color@7.2.0: - resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} - engines: {node: '>=8'} + supports-color@7.2.0: dependencies: has-flag: 4.0.0 - dev: false - /supports-color@8.1.1: - resolution: {integrity: sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==} - engines: {node: '>=10'} + supports-color@8.1.1: dependencies: has-flag: 4.0.0 - dev: false - /text-encoding-utf-8@1.0.2: - resolution: {integrity: sha512-8bw4MY9WjdsD2aMtO0OzOCY3pXGYNx2d2FfHRVUKkiCPDWjKuOlhLVASS+pD7VkLTVjW268LYJHwsnPFlBpbAg==} - dev: false + text-encoding-utf-8@1.0.2: {} - /through@2.3.8: - resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==} - dev: false + through@2.3.8: {} - /to-regex-range@5.0.1: - resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} - engines: {node: '>=8.0'} + to-regex-range@5.0.1: dependencies: is-number: 7.0.0 - dev: false - /toml@3.0.0: - resolution: {integrity: sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w==} - dev: false + toml@3.0.0: {} - /tr46@0.0.3: - resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} - dev: false + tr46@0.0.3: {} - /ts-mocha@8.0.0(mocha@9.2.2): - resolution: {integrity: sha512-Kou1yxTlubLnD5C3unlCVO7nh0HERTezjoVhVw/M5S1SqoUec0WgllQvPk3vzPMc6by8m6xD1uR1yRf8lnVUbA==} - engines: {node: '>= 6.X.X'} - hasBin: true - peerDependencies: - mocha: ^3.X.X || ^4.X.X || ^5.X.X || ^6.X.X || ^7.X.X || ^8.X.X + ts-mocha@8.0.0(mocha@9.2.2): dependencies: mocha: 9.2.2 ts-node: 7.0.1 optionalDependencies: tsconfig-paths: 3.15.0 - dev: false - /ts-node@7.0.1: - resolution: {integrity: sha512-BVwVbPJRspzNh2yfslyT1PSbl5uIk03EZlb493RKHN4qej/D06n1cEhjlOJG69oFsE7OT8XjpTUcYf6pKTLMhw==} - engines: {node: '>=4.2.0'} - hasBin: true + ts-node@7.0.1: dependencies: arrify: 1.0.1 buffer-from: 1.1.2 @@ -1448,148 +1747,73 @@ packages: mkdirp: 0.5.6 source-map-support: 0.5.21 yn: 2.0.0 - dev: false - /tsconfig-paths@3.15.0: - resolution: {integrity: sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==} - requiresBuild: true + tsconfig-paths@3.15.0: dependencies: '@types/json5': 0.0.29 json5: 1.0.2 minimist: 1.2.8 strip-bom: 3.0.0 - dev: false optional: true - /tslib@2.7.0: - resolution: {integrity: sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==} - dev: false + tslib@2.7.0: {} - /type-detect@4.0.8: - resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} - engines: {node: '>=4'} - dev: false + type-detect@4.0.8: {} - /typescript@4.9.5: - resolution: {integrity: sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==} - engines: {node: '>=4.2.0'} - hasBin: true - dev: false + typescript@4.9.5: {} - /undici-types@6.19.8: - resolution: {integrity: sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==} - dev: false + undici-types@6.19.8: {} - /utf-8-validate@5.0.10: - resolution: {integrity: sha512-Z6czzLq4u8fPOyx7TU6X3dvUZVvoJmxSQ+IcrlmagKhilxlhZgxPK6C5Jqbkw1IDUmFTM+cz9QDnnLTwDz/2gQ==} - engines: {node: '>=6.14.2'} - requiresBuild: true + utf-8-validate@5.0.10: dependencies: node-gyp-build: 4.8.2 - dev: false - /util-deprecate@1.0.2: - resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} - dev: false + util-deprecate@1.0.2: {} - /uuid@8.3.2: - resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} - hasBin: true - dev: false + uuid@8.3.2: {} - /webidl-conversions@3.0.1: - resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} - dev: false + webidl-conversions@3.0.1: {} - /whatwg-url@5.0.0: - resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + whatwg-url@5.0.0: dependencies: tr46: 0.0.3 webidl-conversions: 3.0.1 - dev: false - /which@2.0.2: - resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} - engines: {node: '>= 8'} - hasBin: true + which@2.0.2: dependencies: isexe: 2.0.0 - dev: false - /workerpool@6.2.0: - resolution: {integrity: sha512-Rsk5qQHJ9eowMH28Jwhe8HEbmdYDX4lwoMWshiCXugjtHqMD9ZbiqSDLxcsfdqsETPzVUtX5s1Z5kStiIM6l4A==} - dev: false + workerpool@6.2.0: {} - /wrap-ansi@7.0.0: - resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} - engines: {node: '>=10'} + wrap-ansi@7.0.0: dependencies: ansi-styles: 4.3.0 string-width: 4.2.3 strip-ansi: 6.0.1 - dev: false - /wrappy@1.0.2: - resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} - dev: false + wrappy@1.0.2: {} - /ws@7.5.10: - resolution: {integrity: sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==} - engines: {node: '>=8.3.0'} - peerDependencies: - bufferutil: ^4.0.1 - utf-8-validate: ^5.0.2 - peerDependenciesMeta: - bufferutil: - optional: true - utf-8-validate: - optional: true - dev: false + ws@7.5.10: {} - /ws@8.18.0(bufferutil@4.0.8)(utf-8-validate@5.0.10): - resolution: {integrity: sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==} - engines: {node: '>=10.0.0'} - peerDependencies: - bufferutil: ^4.0.1 - utf-8-validate: '>=5.0.2' - peerDependenciesMeta: - bufferutil: - optional: true - utf-8-validate: - optional: true + ws@8.18.0(bufferutil@4.0.8)(utf-8-validate@5.0.10): dependencies: bufferutil: 4.0.8 utf-8-validate: 5.0.10 - dev: false - /y18n@5.0.8: - resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} - engines: {node: '>=10'} - dev: false + y18n@5.0.8: {} - /yargs-parser@20.2.4: - resolution: {integrity: sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==} - engines: {node: '>=10'} - dev: false + yargs-parser@20.2.4: {} - /yargs-parser@20.2.9: - resolution: {integrity: sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==} - engines: {node: '>=10'} - dev: false + yargs-parser@20.2.9: {} - /yargs-unparser@2.0.0: - resolution: {integrity: sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==} - engines: {node: '>=10'} + yargs-unparser@2.0.0: dependencies: camelcase: 6.3.0 decamelize: 4.0.0 flat: 5.0.2 is-plain-obj: 2.1.0 - dev: false - /yargs@16.2.0: - resolution: {integrity: sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==} - engines: {node: '>=10'} + yargs@16.2.0: dependencies: cliui: 7.0.4 escalade: 3.1.2 @@ -1598,14 +1822,7 @@ packages: string-width: 4.2.3 y18n: 5.0.8 yargs-parser: 20.2.9 - dev: false - /yn@2.0.0: - resolution: {integrity: sha512-uTv8J/wiWTgUTg+9vLTi//leUl5vDQS6uii/emeTb2ssY7vl6QWf2fFbIIGjnhjvbdKlU0ed7QPgY1htTC86jQ==} - engines: {node: '>=4'} - dev: false + yn@2.0.0: {} - /yocto-queue@0.1.0: - resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} - engines: {node: '>=10'} - dev: false + yocto-queue@0.1.0: {} diff --git a/contracts/programs/log-read-test/Cargo.toml b/contracts/programs/log-read-test/Cargo.toml new file mode 100644 index 000000000..611d8884c --- /dev/null +++ b/contracts/programs/log-read-test/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "log-read-test" +version = "0.1.0" +description = "Created with Anchor" +edition = "2021" + +[lib] +crate-type = ["cdylib", "lib"] +name = "log_read_test" + +[features] +no-entrypoint = [] +no-idl = [] +no-log-ix-name = [] +cpi = ["no-entrypoint"] +default = [] + +[dependencies] +anchor-lang = "0.29.0" diff --git a/contracts/programs/log-read-test/Xargo.toml b/contracts/programs/log-read-test/Xargo.toml new file mode 100644 index 000000000..475fb71ed --- /dev/null +++ b/contracts/programs/log-read-test/Xargo.toml @@ -0,0 +1,2 @@ +[target.bpfel-unknown-unknown.dependencies.std] +features = [] diff --git a/contracts/programs/log-read-test/src/event.rs b/contracts/programs/log-read-test/src/event.rs new file mode 100644 index 000000000..64c4e91c0 --- /dev/null +++ b/contracts/programs/log-read-test/src/event.rs @@ -0,0 +1,7 @@ +use anchor_lang::prelude::*; + +#[event] +pub struct TestEvent { + pub str_val: String, + pub u64_value: u64, +} diff --git a/contracts/programs/log-read-test/src/lib.rs b/contracts/programs/log-read-test/src/lib.rs new file mode 100644 index 000000000..e2acf26db --- /dev/null +++ b/contracts/programs/log-read-test/src/lib.rs @@ -0,0 +1,25 @@ +use anchor_lang::prelude::*; + +declare_id!("J1zQwrBNBngz26jRPNWsUSZMHJwBwpkoDitXRV95LdK4"); + +pub mod event; + +#[program] +pub mod log_read_test { + use super::*; + + pub fn create_log(_ctx: Context, value: u64) -> Result<()> { + emit!(event::TestEvent { + str_val: "Hello, World!".to_string(), + u64_value: value, + }); + + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialization<'info> { + pub authority: Signer<'info>, + pub system_program: Program<'info, System>, +} diff --git a/integration-tests/common/test_common.go b/integration-tests/common/test_common.go index b351ee73d..94775afb0 100644 --- a/integration-tests/common/test_common.go +++ b/integration-tests/common/test_common.go @@ -253,7 +253,10 @@ func (m *OCRv2TestState) UpgradeContracts(baseDir, subDir string) { "store": m.Common.ChainDetails.ProgramAddresses.Store, } val, ok := ids[programName] - require.True(m.Config.T, ok, fmt.Sprintf("unable to find corresponding key (%s) within %+v", programName, ids)) + if !ok { + val = solclient.BuildProgramIDKeypairPath(programName) + log.Warn().Str("Program", programName).Msg(fmt.Sprintf("falling back to path (%s) unable to find corresponding key (%s) within %+v", val, programName, ids)) + } return val } diff --git a/integration-tests/smoke/event_loader_test.go b/integration-tests/smoke/event_loader_test.go new file mode 100644 index 000000000..cd4bc678c --- /dev/null +++ b/integration-tests/smoke/event_loader_test.go @@ -0,0 +1,296 @@ +package smoke + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "fmt" + "os" + "path/filepath" + "sync" + "testing" + "time" + + bin "github.com/gagliardetto/binary" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/gagliardetto/solana-go/rpc/ws" + "github.com/gagliardetto/solana-go/text" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + contract "github.com/smartcontractkit/chainlink-solana/contracts/generated/log_read_test" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller" + + "github.com/smartcontractkit/chainlink-solana/integration-tests/solclient" + "github.com/smartcontractkit/chainlink-solana/integration-tests/utils" +) + +const programPubKey = "J1zQwrBNBngz26jRPNWsUSZMHJwBwpkoDitXRV95LdK4" + +func TestEventLoader(t *testing.T) { + t.Parallel() + + deadline, ok := t.Deadline() + if !ok { + deadline = time.Now().Add(time.Minute) + } + + ctx, cancel := context.WithDeadline(context.Background(), deadline) + defer cancel() + + // Getting the default localnet private key + privateKey, err := solana.PrivateKeyFromBase58(solclient.DefaultPrivateKeysSolValidator[1]) + require.NoError(t, err) + + rpcURL, wsURL := setupTestValidator(t, privateKey.PublicKey().String()) + rpcClient := rpc.New(rpcURL) + wsClient, err := ws.Connect(ctx, wsURL) + require.NoError(t, err) + + defer wsClient.Close() + + require.NoError(t, err) + client.FundTestAccounts(t, []solana.PublicKey{privateKey.PublicKey()}, rpcURL) + + totalLogsToSend := 30 + parser := &printParser{t: t} + sender := newLogSender(t, rpcClient, wsClient) + collector := logpoller.NewEncodedLogCollector( + rpcClient, + parser, + logger.Nop(), + ) + + require.NoError(t, collector.Start(ctx)) + t.Cleanup(func() { + require.NoError(t, collector.Close()) + }) + + go func(ctx context.Context, sender *logSender, privateKey *solana.PrivateKey) { + var idx int + + for { + idx++ + if idx > totalLogsToSend { + return + } + + timer := time.NewTimer(time.Second) + + select { + case <-ctx.Done(): + timer.Stop() + + return + case <-timer.C: + if err := sender.sendLog(ctx, func(_ solana.PublicKey) *solana.PrivateKey { + return privateKey + }, privateKey.PublicKey(), uint64(idx)); err != nil { + t.Logf("failed to send log: %s", err) + } + } + + timer.Stop() + } + }(ctx, sender, &privateKey) + + expectedSumOfLogValues := uint64((totalLogsToSend / 2) * (totalLogsToSend + 1)) + + // eventually process all logs + tests.AssertEventually(t, func() bool { + return parser.Sum() == expectedSumOfLogValues + }) +} + +// upgradeAuthority is admin solana.PrivateKey as string +func setupTestValidator(t *testing.T, upgradeAuthority string) (string, string) { + t.Helper() + + soPath := filepath.Join(utils.ContractsDir, "log_read_test.so") + + _, err := os.Stat(soPath) + if err != nil { + t.Log(err.Error()) + t.FailNow() + } + + flags := []string{ + "--warp-slot", "42", + "--upgradeable-program", + programPubKey, + soPath, + upgradeAuthority, + } + + return client.SetupLocalSolNodeWithFlags(t, flags...) +} + +type testEvent struct { + StrVal string + U64Value uint64 +} + +type printParser struct { + t *testing.T + + mu sync.RWMutex + values []uint64 +} + +func (p *printParser) Process(evt logpoller.ProgramEvent) error { + p.t.Helper() + + data, err := base64.StdEncoding.DecodeString(evt.Data) + if err != nil { + return err + } + + sum := sha256.Sum256([]byte("event:TestEvent")) + sig := sum[:8] + + if bytes.Equal(sig, data[:8]) { + var event testEvent + if err := bin.UnmarshalBorsh(&event, data[8:]); err != nil { + return nil + } + + p.mu.Lock() + p.values = append(p.values, event.U64Value) + p.mu.Unlock() + } + + return nil +} + +func (p *printParser) Sum() uint64 { + p.t.Helper() + + p.mu.RLock() + defer p.mu.RUnlock() + + var sum uint64 + + for _, value := range p.values { + sum += value + } + + return sum +} + +type logSender struct { + t *testing.T + client *rpc.Client + wsClient *ws.Client + txErrGroup errgroup.Group +} + +func newLogSender(t *testing.T, client *rpc.Client, wsClient *ws.Client) *logSender { + return &logSender{ + t: t, + client: client, + wsClient: wsClient, + txErrGroup: errgroup.Group{}, + } +} + +func (s *logSender) sendLog( + ctx context.Context, + signerFunc func(key solana.PublicKey) *solana.PrivateKey, + payer solana.PublicKey, + value uint64, +) error { + s.t.Helper() + + pubKey, err := solana.PublicKeyFromBase58(programPubKey) + require.NoError(s.t, err) + contract.SetProgramID(pubKey) + + inst, err := contract.NewCreateLogInstruction(value, payer, solana.SystemProgramID).ValidateAndBuild() + if err != nil { + return err + } + + return s.sendInstruction(ctx, inst, signerFunc, payer) +} + +func (s *logSender) sendInstruction( + ctx context.Context, + inst *contract.Instruction, + signerFunc func(key solana.PublicKey) *solana.PrivateKey, + payer solana.PublicKey, +) error { + s.t.Helper() + + recent, err := s.client.GetRecentBlockhash(ctx, rpc.CommitmentFinalized) + if err != nil { + return err + } + + tx, err := solana.NewTransaction( + []solana.Instruction{ + inst, + }, + recent.Value.Blockhash, + solana.TransactionPayer(payer), + ) + if err != nil { + return err + } + + if _, err = tx.EncodeTree(text.NewTreeEncoder(os.Stdout, "Send test log")); err != nil { + return err + } + + if _, err = tx.Sign(signerFunc); err != nil { + return err + } + + sig, err := s.client.SendTransactionWithOpts( + context.Background(), + tx, + rpc.TransactionOpts{ + PreflightCommitment: rpc.CommitmentConfirmed, + }, + ) + + if err != nil { + return err + } + + s.queueTX(sig, rpc.CommitmentConfirmed) + + return nil +} + +func (s *logSender) queueTX(sig solana.Signature, commitment rpc.CommitmentType) { + s.t.Helper() + + s.txErrGroup.Go(func() error { + sub, err := s.wsClient.SignatureSubscribe( + sig, + commitment, + ) + if err != nil { + return err + } + + defer sub.Unsubscribe() + + res, err := sub.Recv() + if err != nil { + return err + } + + if res.Value.Err != nil { + return fmt.Errorf("transaction confirmation failed: %v", res.Value.Err) + } + + return nil + }) +} diff --git a/pkg/solana/client/test_helpers.go b/pkg/solana/client/test_helpers.go index 1e766c02a..5bb8b1cde 100644 --- a/pkg/solana/client/test_helpers.go +++ b/pkg/solana/client/test_helpers.go @@ -3,6 +3,7 @@ package client import ( "bytes" "os/exec" + "strconv" "testing" "time" @@ -15,17 +16,34 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" ) -// SetupLocalSolNode sets up a local solana node via solana cli, and returns the url func SetupLocalSolNode(t *testing.T) string { + t.Helper() + + url, _ := SetupLocalSolNodeWithFlags(t) + + return url +} + +// SetupLocalSolNode sets up a local solana node via solana cli, and returns the url +func SetupLocalSolNodeWithFlags(t *testing.T, flags ...string) (string, string) { + t.Helper() + port := utils.MustRandomPort(t) + portInt, _ := strconv.Atoi(port) + faucetPort := utils.MustRandomPort(t) url := "http://127.0.0.1:" + port - cmd := exec.Command("solana-test-validator", + wsURL := "ws://127.0.0.1:" + strconv.Itoa(portInt+1) + + args := append([]string{ "--reset", "--rpc-port", port, "--faucet-port", faucetPort, "--ledger", t.TempDir(), - ) + }, flags...) + + cmd := exec.Command("solana-test-validator", args...) + var stdErr bytes.Buffer cmd.Stderr = &stdErr var stdOut bytes.Buffer @@ -57,10 +75,13 @@ func SetupLocalSolNode(t *testing.T) string { t.Logf("Cmd output: %s\nCmd error: %s\n", stdOut.String(), stdErr.String()) } require.True(t, ready) - return url + + return url, wsURL } func FundTestAccounts(t *testing.T, keys []solana.PublicKey, url string) { + t.Helper() + for i := range keys { account := keys[i].String() _, err := exec.Command("solana", "airdrop", "100", diff --git a/pkg/solana/logpoller/job.go b/pkg/solana/logpoller/job.go new file mode 100644 index 000000000..1d827a85b --- /dev/null +++ b/pkg/solana/logpoller/job.go @@ -0,0 +1,147 @@ +package logpoller + +import ( + "context" + "fmt" + "time" + + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" +) + +// Job is a function that should be run by the worker group. The context provided +// allows the Job to cancel if the worker group is closed. All other life-cycle +// management should be wrapped within the Job. +type Job interface { + String() string + Run(context.Context) error +} + +type retryableJob struct { + name string + count uint8 + when time.Time + job Job +} + +func (j retryableJob) String() string { + return j.job.String() +} + +func (j retryableJob) Run(ctx context.Context) error { + return j.job.Run(ctx) +} + +type eventDetail struct { + blockNumber uint64 + blockHash solana.Hash + trxIdx int + trxSig solana.Signature +} + +// processEventJob is a job that processes a single event. The parser should be a pure function +// such that no network requests are made and no side effects are produced. +type processEventJob struct { + parser ProgramEventProcessor + event ProgramEvent +} + +func (j *processEventJob) String() string { + return "processEventJob" +} + +func (j *processEventJob) Run(_ context.Context) error { + return j.parser.Process(j.event) +} + +// getTransactionsFromBlockJob is a job that fetches transaction signatures from a block and loads +// the job queue with getTransactionLogsJobs for each transaction found in the block. +type getTransactionsFromBlockJob struct { + slotNumber uint64 + client RPCClient + parser ProgramEventProcessor + chJobs chan Job +} + +func (j *getTransactionsFromBlockJob) String() string { + return fmt.Sprintf("getTransactionsFromBlockJob for block: %d", j.slotNumber) +} + +func (j *getTransactionsFromBlockJob) Run(ctx context.Context) error { + var excludeRewards bool + + block, err := j.client.GetBlockWithOpts( + ctx, + j.slotNumber, + &rpc.GetBlockOpts{ + Encoding: solana.EncodingBase64, + Commitment: rpc.CommitmentFinalized, + // get the full transaction details + TransactionDetails: rpc.TransactionDetailsFull, + // exclude rewards + Rewards: &excludeRewards, + }, + ) + if err != nil { + return err + } + + blockSigsOnly, err := j.client.GetBlockWithOpts( + ctx, + j.slotNumber, + &rpc.GetBlockOpts{ + Encoding: solana.EncodingBase64, + Commitment: rpc.CommitmentFinalized, + // get the signatures only + TransactionDetails: rpc.TransactionDetailsSignatures, + // exclude rewards + Rewards: &excludeRewards, + }, + ) + if err != nil { + return err + } + + detail := eventDetail{ + blockHash: block.Blockhash, + } + + if block.BlockHeight != nil { + detail.blockNumber = *block.BlockHeight + } + + if len(block.Transactions) != len(blockSigsOnly.Signatures) { + return fmt.Errorf("block %d has %d transactions but %d signatures", j.slotNumber, len(block.Transactions), len(blockSigsOnly.Signatures)) + } + + for idx, trx := range block.Transactions { + detail.trxIdx = idx + if len(blockSigsOnly.Signatures)-1 <= idx { + detail.trxSig = blockSigsOnly.Signatures[idx] + } + + messagesToEvents(trx.Meta.LogMessages, j.parser, detail, j.chJobs) + } + + return nil +} + +func messagesToEvents(messages []string, parser ProgramEventProcessor, detail eventDetail, chJobs chan Job) { + var logIdx uint + for _, outputs := range parseProgramLogs(messages) { + for _, event := range outputs.Events { + logIdx++ + + event.BlockNumber = detail.blockNumber + event.BlockHash = detail.blockHash + event.TransactionHash = detail.trxSig + event.TransactionIndex = detail.trxIdx + event.TransactionLogIndex = logIdx + + chJobs <- &processEventJob{ + parser: parser, + event: event, + } + } + } +} diff --git a/pkg/solana/logpoller/loader.go b/pkg/solana/logpoller/loader.go new file mode 100644 index 000000000..56fcef25c --- /dev/null +++ b/pkg/solana/logpoller/loader.go @@ -0,0 +1,281 @@ +package logpoller + +import ( + "context" + "errors" + "sync/atomic" + "time" + + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" +) + +type ProgramEventProcessor interface { + // Process should take a ProgramEvent and parseProgramLogs it based on log signature + // and expected encoding. Only return errors that cannot be handled and + // should exit further transaction processing on the running thread. + // + // Process should be thread safe. + Process(ProgramEvent) error +} + +type RPCClient interface { + GetLatestBlockhash(ctx context.Context, commitment rpc.CommitmentType) (out *rpc.GetLatestBlockhashResult, err error) + GetBlocks(ctx context.Context, startSlot uint64, endSlot *uint64, commitment rpc.CommitmentType) (out rpc.BlocksResult, err error) + GetBlockWithOpts(context.Context, uint64, *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) + GetSignaturesForAddressWithOpts(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) +} + +const ( + DefaultNextSlotPollingInterval = 1_000 * time.Millisecond +) + +type EncodedLogCollector struct { + // service state management + services.Service + engine *services.Engine + + // dependencies and configuration + client RPCClient + parser ProgramEventProcessor + lggr logger.Logger + rpcTimeLimit time.Duration + + // internal state + chSlot chan uint64 + chBlock chan uint64 + chJobs chan Job + workers *WorkerGroup + + highestSlot atomic.Uint64 + highestSlotLoaded atomic.Uint64 + lastSentSlot atomic.Uint64 +} + +func NewEncodedLogCollector( + client RPCClient, + parser ProgramEventProcessor, + lggr logger.Logger, +) *EncodedLogCollector { + c := &EncodedLogCollector{ + client: client, + parser: parser, + chSlot: make(chan uint64), + chBlock: make(chan uint64, 1), + chJobs: make(chan Job, 1), + lggr: lggr, + rpcTimeLimit: 1 * time.Second, + } + + c.Service, c.engine = services.Config{ + Name: "EncodedLogCollector", + NewSubServices: func(lggr logger.Logger) []services.Service { + c.workers = NewWorkerGroup(DefaultWorkerCount, lggr) + + return []services.Service{c.workers} + }, + Start: c.start, + Close: c.close, + }.NewServiceEngine(lggr) + + return c +} + +func (c *EncodedLogCollector) BackfillForAddress(ctx context.Context, address string, fromSlot uint64) error { + pubKey, err := solana.PublicKeyFromBase58(address) + if err != nil { + return err + } + + var ( + lowestSlotRead uint64 + lowestSlotSig solana.Signature + ) + + for lowestSlotRead > fromSlot || lowestSlotRead == 0 { + opts := rpc.GetSignaturesForAddressOpts{ + Commitment: rpc.CommitmentFinalized, + MinContextSlot: &fromSlot, + } + + if lowestSlotRead > 0 { + opts.Before = lowestSlotSig + } + + sigs, err := c.client.GetSignaturesForAddressWithOpts(ctx, pubKey, &opts) + if err != nil { + return err + } + + if len(sigs) == 0 { + break + } + + // signatures ordered from newest to oldest, defined in the Solana RPC docs + for _, sig := range sigs { + lowestSlotSig = sig.Signature + + if sig.Slot >= lowestSlotRead && lowestSlotRead != 0 { + continue + } + + lowestSlotRead = sig.Slot + + if err := c.workers.Do(ctx, &getTransactionsFromBlockJob{ + slotNumber: sig.Slot, + client: c.client, + parser: c.parser, + chJobs: c.chJobs, + }); err != nil { + return err + } + } + } + + return nil +} + +func (c *EncodedLogCollector) start(ctx context.Context) error { + c.engine.Go(c.runSlotPolling) + c.engine.Go(c.runSlotProcessing) + c.engine.Go(c.runBlockProcessing) + c.engine.Go(c.runJobProcessing) + + return nil +} + +func (c *EncodedLogCollector) close() error { + return nil +} + +func (c *EncodedLogCollector) runSlotPolling(ctx context.Context) { + for { + timer := time.NewTimer(DefaultNextSlotPollingInterval) + + select { + case <-ctx.Done(): + timer.Stop() + + return + case <-timer.C: + ctxB, cancel := context.WithTimeout(ctx, c.rpcTimeLimit) + + // not to be run as a job, but as a blocking call + result, err := c.client.GetLatestBlockhash(ctxB, rpc.CommitmentFinalized) + if err != nil { + c.lggr.Error("failed to get latest blockhash", "err", err) + cancel() + + continue + } + + cancel() + + // if the slot is not higher than the highest slot, skip it + if c.lastSentSlot.Load() >= result.Context.Slot { + continue + } + + c.lastSentSlot.Store(result.Context.Slot) + + select { + case c.chSlot <- result.Context.Slot: + default: + } + } + + timer.Stop() + } +} + +func (c *EncodedLogCollector) runSlotProcessing(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case slot := <-c.chSlot: + if c.highestSlot.Load() >= slot { + continue + } + + c.highestSlot.Store(slot) + + // load blocks in slot range + c.loadRange(ctx, c.highestSlotLoaded.Load()+1, slot) + } + } +} + +func (c *EncodedLogCollector) runBlockProcessing(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case block := <-c.chBlock: + if err := c.workers.Do(ctx, &getTransactionsFromBlockJob{ + slotNumber: block, + client: c.client, + parser: c.parser, + chJobs: c.chJobs, + }); err != nil { + c.lggr.Errorf("failed to add job to queue: %s", err) + } + } + } +} + +func (c *EncodedLogCollector) runJobProcessing(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case job := <-c.chJobs: + if err := c.workers.Do(ctx, job); err != nil { + c.lggr.Errorf("failed to add job to queue: %s", err) + } + } + } +} + +func (c *EncodedLogCollector) loadRange(ctx context.Context, start, end uint64) { + if err := c.loadSlotBlocksRange(ctx, start, end); err != nil { + // a retry will happen anyway on the next round of slots + // so the error is handled by doing nothing + c.lggr.Errorw("failed to load slot blocks range", "start", start, "end", end, "err", err) + + return + } + + c.highestSlotLoaded.Store(end) +} + +func (c *EncodedLogCollector) loadSlotBlocksRange(ctx context.Context, start, end uint64) error { + if start >= end { + return errors.New("the start block must come before the end block") + } + + var ( + result rpc.BlocksResult + err error + ) + + rpcCtx, cancel := context.WithTimeout(ctx, c.rpcTimeLimit) + defer cancel() + + if result, err = c.client.GetBlocks(rpcCtx, start, &end, rpc.CommitmentFinalized); err != nil { + return err + } + + for _, block := range result { + select { + case <-ctx.Done(): + return nil + case c.chBlock <- block: + } + } + + return nil +} diff --git a/pkg/solana/logpoller/loader_test.go b/pkg/solana/logpoller/loader_test.go new file mode 100644 index 000000000..69a37702b --- /dev/null +++ b/pkg/solana/logpoller/loader_test.go @@ -0,0 +1,366 @@ +package logpoller_test + +import ( + "context" + "crypto/rand" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller" + mocks "github.com/smartcontractkit/chainlink-solana/pkg/solana/logpoller/mocks" +) + +var ( + messages = []string{ + "Program J1zQwrBNBngz26jRPNWsUSZMHJwBwpkoDitXRV95LdK4 invoke [1]", + "Program log: Instruction: CreateLog", + "Program data: HDQnaQjSWwkNAAAASGVsbG8sIFdvcmxkISoAAAAAAAAA", + "Program J1zQwrBNBngz26jRPNWsUSZMHJwBwpkoDitXRV95LdK4 consumed 1477 of 200000 compute units", + "Program J1zQwrBNBngz26jRPNWsUSZMHJwBwpkoDitXRV95LdK4 success", + } +) + +func TestEncodedLogCollector_StartClose(t *testing.T) { + client := new(mocks.RPCClient) + ctx := tests.Context(t) + + collector := logpoller.NewEncodedLogCollector(client, nil, logger.Nop()) + + assert.NoError(t, collector.Start(ctx)) + assert.NoError(t, collector.Close()) +} + +func TestEncodedLogCollector_ParseSingleEvent(t *testing.T) { + client := new(mocks.RPCClient) + parser := new(testParser) + ctx := tests.Context(t) + + collector := logpoller.NewEncodedLogCollector(client, parser, logger.Nop()) + + require.NoError(t, collector.Start(ctx)) + t.Cleanup(func() { + require.NoError(t, collector.Close()) + }) + + slot := uint64(42) + sig := solana.Signature{2, 1, 4, 2} + blockHeight := uint64(21) + + client.EXPECT().GetLatestBlockhash(mock.Anything, rpc.CommitmentFinalized).Return(&rpc.GetLatestBlockhashResult{ + RPCContext: rpc.RPCContext{ + Context: rpc.Context{ + Slot: slot, + }, + }, + }, nil) + + client.EXPECT().GetBlocks(mock.Anything, uint64(1), mock.MatchedBy(func(val *uint64) bool { + return val != nil && *val == slot + }), mock.Anything).Return(rpc.BlocksResult{slot}, nil) + + client.EXPECT().GetBlockWithOpts(mock.Anything, slot, mock.Anything).Return(&rpc.GetBlockResult{ + Transactions: []rpc.TransactionWithMeta{ + { + Meta: &rpc.TransactionMeta{ + LogMessages: messages, + }, + }, + }, + Signatures: []solana.Signature{sig}, + BlockHeight: &blockHeight, + }, nil).Twice() + + tests.AssertEventually(t, func() bool { + return parser.Called() + }) + + client.AssertExpectations(t) +} + +func TestEncodedLogCollector_BackfillForAddress(t *testing.T) { + client := new(mocks.RPCClient) + parser := new(testParser) + ctx := tests.Context(t) + + collector := logpoller.NewEncodedLogCollector(client, parser, logger.Nop()) + + require.NoError(t, collector.Start(ctx)) + t.Cleanup(func() { + require.NoError(t, collector.Close()) + }) + + pubKey := solana.PublicKey{2, 1, 4, 2} + slots := []uint64{44, 43, 42} + sigs := make([]solana.Signature, len(slots)*2) + blockHeights := []uint64{21, 22, 23, 50} + + for idx := range len(sigs) { + _, _ = rand.Read(sigs[idx][:]) + } + + // GetLatestBlockhash might be called at start-up; make it take some time because the result isn't needed for this test + client.EXPECT().GetLatestBlockhash(mock.Anything, mock.Anything).Return(&rpc.GetLatestBlockhashResult{ + RPCContext: rpc.RPCContext{ + Context: rpc.Context{ + Slot: slots[0], + }, + }, + Value: &rpc.LatestBlockhashResult{ + LastValidBlockHeight: 42, + }, + }, nil).After(2 * time.Second).Maybe() + + client.EXPECT(). + GetSignaturesForAddressWithOpts(mock.Anything, pubKey, mock.MatchedBy(func(opts *rpc.GetSignaturesForAddressOpts) bool { + return opts != nil && opts.Before.String() == solana.Signature{}.String() + })). + Return([]*rpc.TransactionSignature{ + {Slot: slots[0], Signature: sigs[0]}, + {Slot: slots[0], Signature: sigs[1]}, + {Slot: slots[1], Signature: sigs[2]}, + {Slot: slots[1], Signature: sigs[3]}, + {Slot: slots[2], Signature: sigs[4]}, + {Slot: slots[2], Signature: sigs[5]}, + }, nil) + + client.EXPECT().GetSignaturesForAddressWithOpts(mock.Anything, pubKey, mock.Anything).Return([]*rpc.TransactionSignature{}, nil) + + for idx := range len(slots) { + client.EXPECT().GetBlockWithOpts(mock.Anything, slots[idx], mock.Anything).Return(&rpc.GetBlockResult{ + Transactions: []rpc.TransactionWithMeta{ + { + Meta: &rpc.TransactionMeta{ + LogMessages: messages, + }, + }, + { + Meta: &rpc.TransactionMeta{ + LogMessages: messages, + }, + }, + }, + Signatures: []solana.Signature{sigs[idx*2], sigs[(idx*2)+1]}, + BlockHeight: &blockHeights[idx], + }, nil).Twice() + } + + assert.NoError(t, collector.BackfillForAddress(ctx, pubKey.String(), 42)) + + tests.AssertEventually(t, func() bool { + return parser.Count() == 6 + }) + + client.AssertExpectations(t) +} + +func BenchmarkEncodedLogCollector(b *testing.B) { + ctx := tests.Context(b) + + ticker := time.NewTimer(500 * time.Millisecond) + defer ticker.Stop() + + parser := new(testParser) + blockProducer := &testBlockProducer{ + b: b, + nextSlot: 10, + blockSigs: make(map[uint64][]solana.Signature), + sigs: make(map[string]bool), + } + + collector := logpoller.NewEncodedLogCollector(blockProducer, parser, logger.Nop()) + + require.NoError(b, collector.Start(ctx)) + b.Cleanup(func() { + require.NoError(b, collector.Close()) + }) + + b.ReportAllocs() + b.ResetTimer() + +BenchLoop: + for i := 0; i < b.N; i++ { + select { + case <-ticker.C: + blockProducer.incrementSlot() + case <-ctx.Done(): + break BenchLoop + default: + blockProducer.makeEvent() + } + } + + b.ReportMetric(float64(parser.Count())/b.Elapsed().Seconds(), "events/sec") + b.ReportMetric(float64(blockProducer.Count())/b.Elapsed().Seconds(), "rcp_calls/sec") +} + +type testBlockProducer struct { + b *testing.B + + mu sync.RWMutex + nextSlot uint64 + blockSigs map[uint64][]solana.Signature + sigs map[string]bool + count uint64 +} + +func (p *testBlockProducer) incrementSlot() { + p.b.Helper() + + p.mu.Lock() + defer p.mu.Unlock() + + p.nextSlot++ + p.blockSigs[p.nextSlot] = make([]solana.Signature, 0, 100) +} + +func (p *testBlockProducer) makeEvent() { + p.b.Helper() + + p.mu.Lock() + defer p.mu.Unlock() + + var sig solana.Signature + + _, _ = rand.Read(sig[:]) + + p.blockSigs[p.nextSlot] = append(p.blockSigs[p.nextSlot], sig) + p.sigs[sig.String()] = true +} + +func (p *testBlockProducer) Count() uint64 { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.count +} + +func (p *testBlockProducer) GetLatestBlockhash(_ context.Context, _ rpc.CommitmentType) (out *rpc.GetLatestBlockhashResult, err error) { + p.b.Helper() + + p.mu.Lock() + p.count++ + p.mu.Unlock() + + p.mu.RLock() + defer p.mu.RUnlock() + + return &rpc.GetLatestBlockhashResult{ + RPCContext: rpc.RPCContext{ + Context: rpc.Context{ + Slot: p.nextSlot, + }, + }, + }, nil +} + +func (p *testBlockProducer) GetBlocks(_ context.Context, startSlot uint64, endSlot *uint64, _ rpc.CommitmentType) (out rpc.BlocksResult, err error) { + p.b.Helper() + + p.mu.Lock() + p.count++ + p.mu.Unlock() + + blocks := make([]uint64, *endSlot-startSlot) + for idx := range blocks { + blocks[idx] = startSlot + uint64(idx) + } + + return rpc.BlocksResult(blocks), nil +} + +func (p *testBlockProducer) GetBlockWithOpts(_ context.Context, block uint64, opts *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) { + p.b.Helper() + + p.mu.Lock() + defer p.mu.Unlock() + + var result rpc.GetBlockResult + + sigs := p.blockSigs[block] + + switch opts.TransactionDetails { + case rpc.TransactionDetailsFull: + result.Transactions = make([]rpc.TransactionWithMeta, len(sigs)) + for idx, sig := range sigs { + delete(p.sigs, sig.String()) + + result.Transactions[idx] = rpc.TransactionWithMeta{ + Slot: block, + Meta: &rpc.TransactionMeta{ + LogMessages: messages, + }, + } + } + case rpc.TransactionDetailsSignatures: + result.Signatures = sigs + delete(p.blockSigs, block) + case rpc.TransactionDetailsNone: + fallthrough + default: + } + + p.count++ + result.BlockHeight = &block + + return &result, nil +} + +func (p *testBlockProducer) GetSignaturesForAddressWithOpts(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) { + p.b.Helper() + + return nil, nil +} + +func (p *testBlockProducer) GetTransaction(_ context.Context, sig solana.Signature, _ *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error) { + p.b.Helper() + + p.mu.Lock() + defer p.mu.Unlock() + + var msgs []string + + p.count++ + _, ok := p.sigs[sig.String()] + if ok { + msgs = messages + } + + delete(p.sigs, sig.String()) + + return &rpc.GetTransactionResult{ + Meta: &rpc.TransactionMeta{ + LogMessages: msgs, + }, + }, nil +} + +type testParser struct { + called atomic.Bool + count atomic.Uint64 +} + +func (p *testParser) Process(event logpoller.ProgramEvent) error { + p.called.Store(true) + p.count.Store(p.count.Load() + 1) + + return nil +} + +func (p *testParser) Called() bool { + return p.called.Load() +} + +func (p *testParser) Count() uint64 { + return p.count.Load() +} diff --git a/pkg/solana/logpoller/log_data_parser.go b/pkg/solana/logpoller/log_data_parser.go new file mode 100644 index 000000000..4cfd04470 --- /dev/null +++ b/pkg/solana/logpoller/log_data_parser.go @@ -0,0 +1,143 @@ +package logpoller + +import ( + "regexp" + "strconv" + "strings" + + "github.com/gagliardetto/solana-go" +) + +var ( + invokeMatcher = regexp.MustCompile(`Program (\w*) invoke \[(\d)\]`) + consumedMatcher = regexp.MustCompile(`Program \w* consumed (\d*) (.*)`) + logMatcher = regexp.MustCompile(`Program log: (.*)`) + dataMatcher = regexp.MustCompile(`Program data: (.*)`) +) + +type BlockData struct { + BlockNumber uint64 + BlockHash solana.Hash + TransactionHash solana.Signature + TransactionIndex int + TransactionLogIndex uint +} + +type ProgramLog struct { + BlockData + Text string + Prefix string +} + +type ProgramEvent struct { + BlockData + Prefix string + Data string +} + +type ProgramOutput struct { + Program string + Logs []ProgramLog + Events []ProgramEvent + ComputeUnits uint + Truncated bool + Failed bool + ErrorText string +} + +func prefixBuilder(depth int) string { + return strings.Repeat(">", depth) +} + +func parseProgramLogs(logs []string) []ProgramOutput { + var depth int + + instLogs := []ProgramOutput{} + lastLogIdx := -1 + + for _, log := range logs { + if strings.HasPrefix(log, "Program log:") { + logDataMatches := logMatcher.FindStringSubmatch(log) + + if len(logDataMatches) <= 1 || lastLogIdx < 0 { + continue + } + + // this is a general log + instLogs[lastLogIdx].Logs = append(instLogs[lastLogIdx].Logs, ProgramLog{ + Prefix: prefixBuilder(depth), + Text: logDataMatches[1], + }) + } else if strings.HasPrefix(log, "Program data:") { + if lastLogIdx < 0 { + continue + } + + dataMatches := dataMatcher.FindStringSubmatch(log) + + if len(dataMatches) > 1 { + instLogs[lastLogIdx].Events = append(instLogs[lastLogIdx].Events, ProgramEvent{ + Prefix: prefixBuilder(depth), + Data: dataMatches[1], + }) + } + } else if strings.HasPrefix(log, "Log truncated") { + if lastLogIdx < 0 { + continue + } + + instLogs[lastLogIdx].Truncated = true + } else { + matches := invokeMatcher.FindStringSubmatch(log) + + if len(matches) > 0 { + if depth == 0 { + instLogs = append(instLogs, ProgramOutput{ + Program: matches[1], + }) + + lastLogIdx = len(instLogs) - 1 + } + + depth++ + } else if strings.Contains(log, "success") { + depth-- + } else if strings.Contains(log, "failed") { + if lastLogIdx < 0 { + continue + } + + instLogs[lastLogIdx].Failed = true + + idx := strings.Index(log, ": ") + 2 + + // failed to verify log of previous program so reset depth and print full log + if strings.HasPrefix(log, "failed") { + depth++ + } + + instLogs[lastLogIdx].ErrorText = log[idx:] + + depth-- + } else { + if depth == 0 { + instLogs = append(instLogs, ProgramOutput{}) + lastLogIdx = len(instLogs) - 1 + } + + if lastLogIdx < 0 { + continue + } + + matches := consumedMatcher.FindStringSubmatch(log) + if len(matches) == 3 && depth == 1 { + if val, err := strconv.Atoi(matches[1]); err == nil { + instLogs[lastLogIdx].ComputeUnits = uint(val) //nolint:gosec + } + } + } + } + } + + return instLogs +} diff --git a/pkg/solana/logpoller/log_data_parser_test.go b/pkg/solana/logpoller/log_data_parser_test.go new file mode 100644 index 000000000..49123638c --- /dev/null +++ b/pkg/solana/logpoller/log_data_parser_test.go @@ -0,0 +1,203 @@ +package logpoller + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLogDataParse_Error(t *testing.T) { + t.Parallel() + + // logs include 2 program invocations + logs := []string{ + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success", + "Program cjg3oHmg9uuPsP8D6g29NWvhySJkdYdAo9D25PRbKXJ invoke [1]", + "Program log: AnchorError thrown in programs/ocr2/src/lib.rs:639. Error Code: StaleReport. Error Number: 6003. Error Message: Stale report.", + "Program cjg3oHmg9uuPsP8D6g29NWvhySJkdYdAo9D25PRbKXJ consumed 6504 of 199850 compute units", + "Program cjg3oHmg9uuPsP8D6g29NWvhySJkdYdAo9D25PRbKXJ failed: custom program error: 0x1773", + } + + output := parseProgramLogs(logs) + + require.Len(t, output, 2) + + // first program has no logs, no events, no compute units and succeeded + assert.Equal(t, ProgramOutput{ + Program: "ComputeBudget111111111111111111111111111111", + }, output[0]) + + // second program should have one log, no events, 6504 compute units and failed with error message + expected := ProgramOutput{ + Program: "cjg3oHmg9uuPsP8D6g29NWvhySJkdYdAo9D25PRbKXJ", + Logs: []ProgramLog{ + { + Prefix: ">", + Text: "AnchorError thrown in programs/ocr2/src/lib.rs:639. Error Code: StaleReport. Error Number: 6003. Error Message: Stale report.", + }, + }, + ComputeUnits: 6504, + Failed: true, + ErrorText: "custom program error: 0x1773", + } + + assert.Equal(t, expected, output[1]) +} + +func TestLogDataParse_SuccessBasic(t *testing.T) { + t.Parallel() + + // logs include 2 program invocations + logs := []string{ + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success", + "Program SAGE2HAwep459SNq61LHvjxPk4pLPEJLoMETef7f7EE invoke [1]", + "Program log: Instruction: IdleToLoadingBay", + "Program log: Current state: Idle(Idle { sector: [13, 37] })", + "Program SAGE2HAwep459SNq61LHvjxPk4pLPEJLoMETef7f7EE consumed 16850 of 199850 compute units", + "Program SAGE2HAwep459SNq61LHvjxPk4pLPEJLoMETef7f7EE success", + } + + output := parseProgramLogs(logs) + + require.Len(t, output, 2) + + // first program has no logs, no events, no compute units and succeeded + assert.Equal(t, ProgramOutput{ + Program: "ComputeBudget111111111111111111111111111111", + }, output[0]) + + // second program should have one log, no events, 6504 compute units and failed with error message + expected := ProgramOutput{ + Program: "SAGE2HAwep459SNq61LHvjxPk4pLPEJLoMETef7f7EE", + Logs: []ProgramLog{ + {Prefix: ">", Text: "Instruction: IdleToLoadingBay"}, + {Prefix: ">", Text: "Current state: Idle(Idle { sector: [13, 37] })"}, + }, + ComputeUnits: 16850, + } + + assert.Equal(t, expected, output[1]) +} + +func TestLogDataParse_SuccessComplex(t *testing.T) { + t.Parallel() + + // example program log output from solana explorer + // tx_sig: 54tfPQgreeturXgQovpB6dBmprhEqaK6JoVCEsVRSBCG9wJrqAnezUWPwEN11PpEE2mAW5dD9xHpSdZD7krafHia + // slot: 302_573_728 + logs := []string{ + // [0] + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success", + // [1] + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success", + // [2] System program + "Program 11111111111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 success", + // [3] Token program + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [1]", + "Program log: Instruction: InitializeAccount", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 3443 of 99550 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + // [4] Associated token program + "Program ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL invoke [1]", + "Program log: Create", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: GetAccountDataSize", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 1569 of 89240 compute units", + "Program return: TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA pQAAAAAAAAA=", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program log: Initialize the associated token account", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: InitializeImmutableOwner", + "Program log: Please upgrade to SPL Token 2022 for immutable owner support", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 1405 of 82653 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: InitializeAccount3", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4188 of 78771 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL consumed 21807 of 96107 compute units", + "Program ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL success", + // [5] + "Program 675kPX9MHTjS2zt1qfr1NYHuzeLXfQM9H24wFSUt1Mp8 invoke [1]", + "Program log: ray_log: AwDC6wsAAAAAHxsZjgkAAAACAAAAAAAAAADC6wsAAAAAMW3pEz4AAAD7j2wjcDsAAAXbgGALAAAA", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: Transfer", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4736 of 56164 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: Transfer", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4645 of 48447 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program 675kPX9MHTjS2zt1qfr1NYHuzeLXfQM9H24wFSUt1Mp8 consumed 31576 of 74300 compute units", + "Program 675kPX9MHTjS2zt1qfr1NYHuzeLXfQM9H24wFSUt1Mp8 success", + // [6] + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [1]", + "Program log: Instruction: CloseAccount", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 2915 of 42724 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + // [7] System program + "Program 11111111111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 success", + // [8] + "Program 4pP8eDKACuV7T2rbFPE8CHxGKDYAzSdRsdMsGvz2k4oc invoke [1]", + "Program log: Received timestamp: 1732124122", + "Program log: Current timestamp: 1732124102", + "Program log: The provided timestamp is valid.", + "Program 4pP8eDKACuV7T2rbFPE8CHxGKDYAzSdRsdMsGvz2k4oc consumed 1661 of 39659 compute units", + "Program 4pP8eDKACuV7T2rbFPE8CHxGKDYAzSdRsdMsGvz2k4oc success", + // [9] System program + "Program 11111111111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 success", + // [10] + "Program HQ2UUt18uJqKaQFJhgV9zaTdQxUZjNrsKFgoEDquBkcx invoke [1]", + "Program log: Powered by bloXroute Trader Api", + "Program HQ2UUt18uJqKaQFJhgV9zaTdQxUZjNrsKFgoEDquBkcx consumed 803 of 37848 compute units", + "Program HQ2UUt18uJqKaQFJhgV9zaTdQxUZjNrsKFgoEDquBkcx success", + } + + output := parseProgramLogs(logs) + + require.Len(t, output, 11) + + // first two programs have no logs, no events, no compute units and succeeded + for idx := range 1 { + assert.Equal(t, ProgramOutput{ + Program: "ComputeBudget111111111111111111111111111111", + }, output[idx]) + } + + expectedSystemProgramIdxs := []int{2, 7, 9} + for _, idx := range expectedSystemProgramIdxs { + assert.Equal(t, ProgramOutput{ + Program: "11111111111111111111111111111111", + }, output[idx]) + } + + require.Len(t, output[4].Logs, 6) +} + +func TestLogDataParse_Events(t *testing.T) { + t.Parallel() + + // example program event output from test contract + logs := []string{ + "Program J1zQwrBNBngz26jRPNWsUSZMHJwBwpkoDitXRV95LdK4 invoke [1]", + "Program log: Instruction: CreateLog", + "Program data: HDQnaQjSWwkNAAAASGVsbG8sIFdvcmxkISoAAAAAAAAA", // base64 encoded; borsh encoded with identifier + "Program J1zQwrBNBngz26jRPNWsUSZMHJwBwpkoDitXRV95LdK4 consumed 1477 of 200000 compute units", + "Program J1zQwrBNBngz26jRPNWsUSZMHJwBwpkoDitXRV95LdK4 success", + } + + output := parseProgramLogs(logs) + + require.Len(t, output, 1) + assert.Len(t, output[0].Events, 1) +} diff --git a/pkg/solana/logpoller/mocks/rpc_client.go b/pkg/solana/logpoller/mocks/rpc_client.go new file mode 100644 index 000000000..851eba9ec --- /dev/null +++ b/pkg/solana/logpoller/mocks/rpc_client.go @@ -0,0 +1,280 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + rpc "github.com/gagliardetto/solana-go/rpc" + + solana "github.com/gagliardetto/solana-go" +) + +// RPCClient is an autogenerated mock type for the RPCClient type +type RPCClient struct { + mock.Mock +} + +type RPCClient_Expecter struct { + mock *mock.Mock +} + +func (_m *RPCClient) EXPECT() *RPCClient_Expecter { + return &RPCClient_Expecter{mock: &_m.Mock} +} + +// GetBlockWithOpts provides a mock function with given fields: _a0, _a1, _a2 +func (_m *RPCClient) GetBlockWithOpts(_a0 context.Context, _a1 uint64, _a2 *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for GetBlockWithOpts") + } + + var r0 *rpc.GetBlockResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, *rpc.GetBlockOpts) (*rpc.GetBlockResult, error)); ok { + return rf(_a0, _a1, _a2) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, *rpc.GetBlockOpts) *rpc.GetBlockResult); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.GetBlockResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, *rpc.GetBlockOpts) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RPCClient_GetBlockWithOpts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockWithOpts' +type RPCClient_GetBlockWithOpts_Call struct { + *mock.Call +} + +// GetBlockWithOpts is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 uint64 +// - _a2 *rpc.GetBlockOpts +func (_e *RPCClient_Expecter) GetBlockWithOpts(_a0 interface{}, _a1 interface{}, _a2 interface{}) *RPCClient_GetBlockWithOpts_Call { + return &RPCClient_GetBlockWithOpts_Call{Call: _e.mock.On("GetBlockWithOpts", _a0, _a1, _a2)} +} + +func (_c *RPCClient_GetBlockWithOpts_Call) Run(run func(_a0 context.Context, _a1 uint64, _a2 *rpc.GetBlockOpts)) *RPCClient_GetBlockWithOpts_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(*rpc.GetBlockOpts)) + }) + return _c +} + +func (_c *RPCClient_GetBlockWithOpts_Call) Return(_a0 *rpc.GetBlockResult, _a1 error) *RPCClient_GetBlockWithOpts_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *RPCClient_GetBlockWithOpts_Call) RunAndReturn(run func(context.Context, uint64, *rpc.GetBlockOpts) (*rpc.GetBlockResult, error)) *RPCClient_GetBlockWithOpts_Call { + _c.Call.Return(run) + return _c +} + +// GetBlocks provides a mock function with given fields: ctx, startSlot, endSlot, commitment +func (_m *RPCClient) GetBlocks(ctx context.Context, startSlot uint64, endSlot *uint64, commitment rpc.CommitmentType) (rpc.BlocksResult, error) { + ret := _m.Called(ctx, startSlot, endSlot, commitment) + + if len(ret) == 0 { + panic("no return value specified for GetBlocks") + } + + var r0 rpc.BlocksResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, *uint64, rpc.CommitmentType) (rpc.BlocksResult, error)); ok { + return rf(ctx, startSlot, endSlot, commitment) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, *uint64, rpc.CommitmentType) rpc.BlocksResult); ok { + r0 = rf(ctx, startSlot, endSlot, commitment) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(rpc.BlocksResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, *uint64, rpc.CommitmentType) error); ok { + r1 = rf(ctx, startSlot, endSlot, commitment) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RPCClient_GetBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlocks' +type RPCClient_GetBlocks_Call struct { + *mock.Call +} + +// GetBlocks is a helper method to define mock.On call +// - ctx context.Context +// - startSlot uint64 +// - endSlot *uint64 +// - commitment rpc.CommitmentType +func (_e *RPCClient_Expecter) GetBlocks(ctx interface{}, startSlot interface{}, endSlot interface{}, commitment interface{}) *RPCClient_GetBlocks_Call { + return &RPCClient_GetBlocks_Call{Call: _e.mock.On("GetBlocks", ctx, startSlot, endSlot, commitment)} +} + +func (_c *RPCClient_GetBlocks_Call) Run(run func(ctx context.Context, startSlot uint64, endSlot *uint64, commitment rpc.CommitmentType)) *RPCClient_GetBlocks_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(*uint64), args[3].(rpc.CommitmentType)) + }) + return _c +} + +func (_c *RPCClient_GetBlocks_Call) Return(out rpc.BlocksResult, err error) *RPCClient_GetBlocks_Call { + _c.Call.Return(out, err) + return _c +} + +func (_c *RPCClient_GetBlocks_Call) RunAndReturn(run func(context.Context, uint64, *uint64, rpc.CommitmentType) (rpc.BlocksResult, error)) *RPCClient_GetBlocks_Call { + _c.Call.Return(run) + return _c +} + +// GetLatestBlockhash provides a mock function with given fields: ctx, commitment +func (_m *RPCClient) GetLatestBlockhash(ctx context.Context, commitment rpc.CommitmentType) (*rpc.GetLatestBlockhashResult, error) { + ret := _m.Called(ctx, commitment) + + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockhash") + } + + var r0 *rpc.GetLatestBlockhashResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, rpc.CommitmentType) (*rpc.GetLatestBlockhashResult, error)); ok { + return rf(ctx, commitment) + } + if rf, ok := ret.Get(0).(func(context.Context, rpc.CommitmentType) *rpc.GetLatestBlockhashResult); ok { + r0 = rf(ctx, commitment) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.GetLatestBlockhashResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, rpc.CommitmentType) error); ok { + r1 = rf(ctx, commitment) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RPCClient_GetLatestBlockhash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestBlockhash' +type RPCClient_GetLatestBlockhash_Call struct { + *mock.Call +} + +// GetLatestBlockhash is a helper method to define mock.On call +// - ctx context.Context +// - commitment rpc.CommitmentType +func (_e *RPCClient_Expecter) GetLatestBlockhash(ctx interface{}, commitment interface{}) *RPCClient_GetLatestBlockhash_Call { + return &RPCClient_GetLatestBlockhash_Call{Call: _e.mock.On("GetLatestBlockhash", ctx, commitment)} +} + +func (_c *RPCClient_GetLatestBlockhash_Call) Run(run func(ctx context.Context, commitment rpc.CommitmentType)) *RPCClient_GetLatestBlockhash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(rpc.CommitmentType)) + }) + return _c +} + +func (_c *RPCClient_GetLatestBlockhash_Call) Return(out *rpc.GetLatestBlockhashResult, err error) *RPCClient_GetLatestBlockhash_Call { + _c.Call.Return(out, err) + return _c +} + +func (_c *RPCClient_GetLatestBlockhash_Call) RunAndReturn(run func(context.Context, rpc.CommitmentType) (*rpc.GetLatestBlockhashResult, error)) *RPCClient_GetLatestBlockhash_Call { + _c.Call.Return(run) + return _c +} + +// GetSignaturesForAddressWithOpts provides a mock function with given fields: _a0, _a1, _a2 +func (_m *RPCClient) GetSignaturesForAddressWithOpts(_a0 context.Context, _a1 solana.PublicKey, _a2 *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for GetSignaturesForAddressWithOpts") + } + + var r0 []*rpc.TransactionSignature + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error)); ok { + return rf(_a0, _a1, _a2) + } + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) []*rpc.TransactionSignature); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*rpc.TransactionSignature) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RPCClient_GetSignaturesForAddressWithOpts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSignaturesForAddressWithOpts' +type RPCClient_GetSignaturesForAddressWithOpts_Call struct { + *mock.Call +} + +// GetSignaturesForAddressWithOpts is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 solana.PublicKey +// - _a2 *rpc.GetSignaturesForAddressOpts +func (_e *RPCClient_Expecter) GetSignaturesForAddressWithOpts(_a0 interface{}, _a1 interface{}, _a2 interface{}) *RPCClient_GetSignaturesForAddressWithOpts_Call { + return &RPCClient_GetSignaturesForAddressWithOpts_Call{Call: _e.mock.On("GetSignaturesForAddressWithOpts", _a0, _a1, _a2)} +} + +func (_c *RPCClient_GetSignaturesForAddressWithOpts_Call) Run(run func(_a0 context.Context, _a1 solana.PublicKey, _a2 *rpc.GetSignaturesForAddressOpts)) *RPCClient_GetSignaturesForAddressWithOpts_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(solana.PublicKey), args[2].(*rpc.GetSignaturesForAddressOpts)) + }) + return _c +} + +func (_c *RPCClient_GetSignaturesForAddressWithOpts_Call) Return(_a0 []*rpc.TransactionSignature, _a1 error) *RPCClient_GetSignaturesForAddressWithOpts_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *RPCClient_GetSignaturesForAddressWithOpts_Call) RunAndReturn(run func(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error)) *RPCClient_GetSignaturesForAddressWithOpts_Call { + _c.Call.Return(run) + return _c +} + +// NewRPCClient creates a new instance of RPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRPCClient(t interface { + mock.TestingT + Cleanup(func()) +}) *RPCClient { + mock := &RPCClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/solana/logpoller/worker.go b/pkg/solana/logpoller/worker.go new file mode 100644 index 000000000..0e7d31df0 --- /dev/null +++ b/pkg/solana/logpoller/worker.go @@ -0,0 +1,368 @@ +package logpoller + +import ( + "context" + "crypto/rand" + "fmt" + "math/big" + "sync" + "sync/atomic" + "time" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" +) + +var ( + ErrProcessStopped = fmt.Errorf("worker process has stopped") + ErrContextCancelled = fmt.Errorf("worker context cancelled") +) + +const ( + // DefaultMaxRetryCount is the number of times a job will be retried before being dropped. + DefaultMaxRetryCount = 6 + // DefaultNotifyRetryDepth is the retry queue depth at which the worker group will log a warning. + DefaultNotifyRetryDepth = 200 + // DefaultNotifyQueueDepth is the queue depth at which the worker group will log a warning. + DefaultNotifyQueueDepth = 100 + // DefaultWorkerCount is the default number of workers in a WorkerGroup. + DefaultWorkerCount = 10 +) + +type worker struct { + Name string + Queue chan *worker + Retry chan Job + Lggr logger.Logger +} + +func (w *worker) Do(ctx context.Context, job Job) { + if ctx.Err() == nil { + if err := job.Run(ctx); err != nil { + w.Lggr.Errorf("job %s failed with error; retrying: %s", job, err) + w.Retry <- job + } + } + + // put itself back on the queue when done + select { + case w.Queue <- w: + default: + } +} + +type WorkerGroup struct { + // service state management + services.Service + engine *services.Engine + + // dependencies and configuration + maxWorkers int + maxRetryCount uint8 + lggr logger.Logger + + // worker group state + workers chan *worker + queue *queue[Job] + input chan Job + chInputNotify chan struct{} + + chStopInputs chan struct{} + queueClosed atomic.Bool + + // retry queue + chRetry chan Job + mu sync.RWMutex + retryMap map[string]retryableJob +} + +func NewWorkerGroup(workers int, lggr logger.Logger) *WorkerGroup { + g := &WorkerGroup{ + maxWorkers: workers, + maxRetryCount: DefaultMaxRetryCount, + workers: make(chan *worker, workers), + lggr: lggr, + queue: newQueue[Job](0), + input: make(chan Job, 1), + chInputNotify: make(chan struct{}, 1), + chStopInputs: make(chan struct{}), + chRetry: make(chan Job, 1), + retryMap: make(map[string]retryableJob), + } + + g.Service, g.engine = services.Config{ + Name: "WorkerGroup", + Start: g.start, + Close: g.close, + }.NewServiceEngine(lggr) + + for idx := range workers { + g.workers <- &worker{ + Name: fmt.Sprintf("worker-%d", idx+1), + Queue: g.workers, + Retry: g.chRetry, + Lggr: g.lggr, + } + } + + return g +} + +var _ services.Service = &WorkerGroup{} + +func (g *WorkerGroup) start(ctx context.Context) error { + g.engine.Go(g.runQueuing) + g.engine.Go(g.runProcessing) + g.engine.Go(g.runRetryQueue) + g.engine.Go(g.runRetries) + + return nil +} + +func (g *WorkerGroup) close() error { + if !g.queueClosed.Load() { + g.queueClosed.Store(true) + close(g.chStopInputs) + } + + return nil +} + +// Do adds a new work item onto the work queue. This function blocks until +// the work queue clears up or the context is cancelled. This allows a max wait +// time for the queue to open. Or a context can wrap a collection of jobs that +// need to be run and when the context cancels, the jobs don't get added to the +// queue. +func (g *WorkerGroup) Do(ctx context.Context, job Job) error { + if ctx.Err() != nil { + return fmt.Errorf("%w; work not added to queue", ErrContextCancelled) + } + + if g.queueClosed.Load() { + return fmt.Errorf("%w; work not added to queue", ErrProcessStopped) + } + + select { + case g.input <- job: + return nil + case <-ctx.Done(): + return fmt.Errorf("%w; work not added to queue", ErrContextCancelled) + case <-g.chStopInputs: + return fmt.Errorf("%w; work not added to queue", ErrProcessStopped) + } +} + +func (g *WorkerGroup) runQueuing(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case item := <-g.input: + g.queue.Add(item) + + // notify that new work item came in + // drop if notification channel is full + select { + case g.chInputNotify <- struct{}{}: + default: + } + } + } +} + +func (g *WorkerGroup) runProcessing(ctx context.Context) { +Loop: + for { + select { + // watch notification channel and begin processing queue + // when notification occurs + case <-g.chInputNotify: + g.processQueue(ctx) + case <-ctx.Done(): + break Loop + } + } +} + +func (g *WorkerGroup) runRetryQueue(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case job := <-g.chRetry: + var retry retryableJob + + switch typedJob := job.(type) { + case retryableJob: + retry = typedJob + retry.count++ + + if retry.count > g.maxRetryCount { + g.lggr.Errorf("job %s dropped after max retries", job) + + continue + } + + wait := calculateExponentialBackoff(retry.count) + g.lggr.Errorf("retrying job in %dms", wait/time.Millisecond) + + retry.when = time.Now().Add(wait) + default: + wait := calculateExponentialBackoff(0) + + g.lggr.Errorf("retrying job %s in %s", job, wait) + + retry = retryableJob{ + name: createRandomString(12), + job: job, + when: time.Now().Add(wait), + } + } + + g.mu.Lock() + g.retryMap[retry.name] = retry + + if len(g.retryMap) >= DefaultNotifyRetryDepth { + g.lggr.Errorf("retry queue depth: %d", len(g.retryMap)) + } + g.mu.Unlock() + } + } +} + +func (g *WorkerGroup) runRetries(ctx context.Context) { + for { + // run timer on minimum backoff + timer := time.NewTimer(calculateExponentialBackoff(0)) + + select { + case <-ctx.Done(): + timer.Stop() + + return + case <-timer.C: + g.mu.RLock() + keys := make([]string, 0, len(g.retryMap)) + retries := make([]retryableJob, 0, len(g.retryMap)) + + for key, retry := range g.retryMap { + if time.Now().After(retry.when) { + keys = append(keys, key) + retries = append(retries, retry) + } + } + g.mu.RUnlock() + + for idx, key := range keys { + g.mu.Lock() + delete(g.retryMap, key) + g.mu.Unlock() + + g.doJob(ctx, retries[idx]) + } + + timer.Stop() + } + } +} + +func (g *WorkerGroup) processQueue(ctx context.Context) { + for { + if g.queue.Len() == 0 { + break + } + + if g.queue.Len() >= DefaultNotifyQueueDepth { + g.lggr.Errorf("queue depth: %d", g.queue.Len()) + } + + value, err := g.queue.Pop() + + // an error from pop means there is nothing to pop + // the length check above should protect from that, but just in case + // this error also breaks the loop + if err != nil { + break + } + + g.doJob(ctx, value) + } +} + +func (g *WorkerGroup) doJob(ctx context.Context, job Job) { + wkr := <-g.workers + + go wkr.Do(ctx, job) +} + +type queue[T any] struct { + mu sync.RWMutex + values []T +} + +func newQueue[T any](len uint) *queue[T] { + values := make([]T, len) + + return &queue[T]{ + values: values, + } +} + +func (q *queue[T]) Add(values ...T) { + q.mu.Lock() + defer q.mu.Unlock() + + q.values = append(q.values, values...) +} + +func (q *queue[T]) Pop() (T, error) { + q.mu.Lock() + defer q.mu.Unlock() + + if len(q.values) == 0 { + return getZero[T](), fmt.Errorf("no values to return") + } + + val := q.values[0] + + if len(q.values) > 1 { + q.values = q.values[1:] + } else { + q.values = []T{} + } + + return val, nil +} + +func (q *queue[T]) Len() int { + q.mu.RLock() + defer q.mu.RUnlock() + + return len(q.values) +} + +func getZero[T any]() T { + var result T + return result +} + +func createRandomString(length int) string { + const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + b := make([]byte, length) + + for i := range b { + rVal, err := rand.Int(rand.Reader, big.NewInt(int64(len(charset)))) + if err != nil { + rVal = big.NewInt(12) + } + + b[i] = charset[rVal.Int64()] + } + + return string(b) +} + +func calculateExponentialBackoff(retries uint8) time.Duration { + // 200ms, 400ms, 800ms, 1.6s, 3.2s, 6.4s + return time.Duration(2<