diff --git a/.github/actions/build-sign-publish-chainlink/action.yml b/.github/actions/build-sign-publish-chainlink/action.yml index 3d0511efde0..6514f123b2d 100644 --- a/.github/actions/build-sign-publish-chainlink/action.yml +++ b/.github/actions/build-sign-publish-chainlink/action.yml @@ -41,8 +41,12 @@ inputs: cosign-private-key: description: The private key to be used with cosign to sign the image required: false + cosign-public-key: + description: The public key to be used with cosign for verification + required: false cosign-password: description: The password to decrypt the cosign private key needed to sign the image + required: false sign-method: description: Build image will be signed using keypair or keyless methods default: "keypair" @@ -108,6 +112,8 @@ runs: id: meta-root uses: docker/metadata-action@e5622373a38e60fb6d795a4421e56882f2d7a681 # v3.6.2 with: + flavor: | + suffix=-root # list of Docker images to use as base name for tags images: ${{ env.shared-images }} tags: ${{ env.shared-tag-list }} @@ -179,9 +185,9 @@ runs: - if: inputs.sign-images == 'true' name: Install cosign - uses: sigstore/cosign-installer@1e95c1de343b5b0c23352d6417ee3e48d5bcd422 # v1.4.0 + uses: sigstore/cosign-installer@581838fbedd492d2350a9ecd427a95d6de1e5d01 # v2.1.0 with: - cosign-release: 'v1.4.0' + cosign-release: 'v1.6.0' - if: inputs.sign-images == 'true' && inputs.sign-method == 'keypair' name: Sign the published root Docker image using keypair method diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 6cae40e2250..cdb1d4fe2f5 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -14,7 +14,7 @@ updates: directory: '/' schedule: interval: monthly - open-pull-requests-limit: 10 + open-pull-requests-limit: 0 ignore: - dependency-name: webpack versions: diff --git a/.github/workflows/build-custom.yml b/.github/workflows/build-custom.yml index 1ca03a6ff85..d12c03b77b7 100644 --- a/.github/workflows/build-custom.yml +++ b/.github/workflows/build-custom.yml @@ -22,29 +22,38 @@ on: inputs: cl_repo: required: true + description: The chainlik ecr repository to use default: ${{ github.repository }} type: string cl_ref: required: false + description: The git ref from cl to use default: develop type: string dep_solana_sha: required: false + description: chainlink-solana commit or branch type: string dep_terra_sha: required: false + description: chainlink-terra commit or branch type: string secrets: AWS_ACCESS_KEY_ID: required: true + description: The AWS access key id to use AWS_SECRET_ACCESS_KEY: required: true + description: The AWS secret key to use AWS_REGION: required: true + description: The AWS region to use AWS_ROLE_TO_ASSUME: required: true + description: The AWS role to assume QA_KUBECONFIG: required: true + description: The kubernetes configuation to use jobs: build-chainlink: name: Build Chainlink Image @@ -57,7 +66,7 @@ jobs: ref: ${{ github.event.inputs.cl_ref }} - uses: actions/setup-go@v2 with: - go-version: ~1.17 + go-version: ^1.18 - name: Replace Solana deps manual flow if: ${{ github.event.inputs.dep_solana_sha }} run: | @@ -76,7 +85,7 @@ jobs: go get github.com/smartcontractkit/chainlink-terra@${{ inputs.dep_terra_sha }} - name: Tidy run: | - go mod tidy -compat=1.17 + go mod tidy - name: Env vars run: env - name: Cat go.mod diff --git a/.github/workflows/build-publish.yml b/.github/workflows/build-publish.yml index d7dedb17ea6..6aa5d983323 100644 --- a/.github/workflows/build-publish.yml +++ b/.github/workflows/build-publish.yml @@ -7,7 +7,6 @@ on: - 'v*' branches: - master - - develop jobs: build-sign-publish-chainlink: diff --git a/.github/workflows/ci-core.yml b/.github/workflows/ci-core.yml index c4a16331bbb..11485ca0a09 100644 --- a/.github/workflows/ci-core.yml +++ b/.github/workflows/ci-core.yml @@ -35,20 +35,42 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: ~1.17 - - name: Cache Go vendor packages + go-version: ^1.18 + - name: Go Cache uses: actions/cache@v2 with: - path: /go/pkg/mod - key: go-mod-${{ env.CACHE_VERSION }}-${{ hashFiles('go.sum') }} + # * Module download cache + # * Build cache (Linux) + path: | + ~/go/pkg/mod + ~/.cache/go-build + key: go-mod-${{ matrix.cmd }}-${{ hashFiles('go.sum') }} restore-keys: | - go-mod-${{ env.CACHE_VERSION }} + go-mod-${{ matrix.cmd }}- + go-mod- - name: Touching core/web/assets/index.html run: mkdir -p core/web/assets && touch core/web/assets/index.html - name: Download Go vendor packages run: go mod download + - name: Yarn cache + uses: actions/cache@v2 + env: + cache-name: yarn-cache + with: + path: | + ~/.npm + ~/.cache + **/node_modules + key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} + restore-keys: | + ${{ runner.os }}-build-${{ env.cache-name }}- + ${{ runner.os }}-build- + ${{ runner.os }}- + - run: yarn install --frozen-lockfile - name: Install terrad run: ./tools/ci/install_terrad + - name: Install solana cli + run: ./tools/ci/install_solana - name: Setup DB run: go run ./core local db preparetest - name: Run tests @@ -66,4 +88,3 @@ jobs: uses: docker://docker:latest with: args: logs ${{ job.services.postgres.id }} - diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index da7f9f80abc..d14d4eec2b5 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -33,18 +33,18 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: ~1.17 + go-version: ^1.18 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@546b30f35ae5a3db0e0be1843008c2224f71c3b0 + uses: github/codeql-action/init@28eead240834b314f7def40f6fcba65d100d99b1 # v2.1.6 with: languages: ${{ matrix.language }} # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@546b30f35ae5a3db0e0be1843008c2224f71c3b0 + uses: github/codeql-action/autobuild@28eead240834b314f7def40f6fcba65d100d99b1 # v2.1.6 # ℹī¸ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -58,4 +58,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@546b30f35ae5a3db0e0be1843008c2224f71c3b0 + uses: github/codeql-action/analyze@28eead240834b314f7def40f6fcba65d100d99b1 # v2.1.6 diff --git a/.github/workflows/dependency-check.yml b/.github/workflows/dependency-check.yml index 087cfb451fc..d0e028eff95 100644 --- a/.github/workflows/dependency-check.yml +++ b/.github/workflows/dependency-check.yml @@ -20,7 +20,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: ~1.17 + go-version: ^1.18 id: go - name: Write Go Modules list diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 1d80584e402..2259594588e 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -21,8 +21,10 @@ jobs: golangci: name: lint runs-on: ubuntu-latest - go: '1.17' steps: + - uses: actions/setup-go@v2 + with: + go-version: ~1.18 - uses: actions/checkout@v2 with: fetch-depth: 0 @@ -31,7 +33,9 @@ jobs: uses: golangci/golangci-lint-action@v2 with: # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version - version: v1.43.0 + version: v1.45.2 + + skip-go-installation: true # Optional: working directory, useful for monorepos # working-directory: somedir diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 76c570fc693..137b1ecf741 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -13,7 +13,10 @@ jobs: filters: | src: - '**/*.go' + - '**/*go.sum' + - '**/*go.mod' build-chainlink: + environment: integration name: Build Chainlink Image runs-on: ubuntu-latest needs: changes @@ -44,6 +47,7 @@ jobs: tags: 795953128386.dkr.ecr.${{ secrets.AWS_REGION }}.amazonaws.com/chainlink:latest.${{ github.sha }} push: true smoke: + environment: integration name: ETH Smoke Tests runs-on: ubuntu-latest needs: [changes, build-chainlink] @@ -58,7 +62,7 @@ jobs: - name: Setup go uses: actions/setup-go@v1 with: - go-version: 1.17 + go-version: 1.18 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 with: @@ -89,14 +93,11 @@ jobs: if: steps.cache-packages.outputs.cache-hit != 'true' run: go mod download - name: Install Ginkgo CLI - run: | - go get github.com/onsi/ginkgo/v2/ginkgo/generators@v2.1.2 - go get github.com/onsi/ginkgo/v2/ginkgo/internal@v2.1.2 - go get github.com/onsi/ginkgo/v2/ginkgo/labels@v2.1.2 - go install github.com/onsi/ginkgo/v2/ginkgo + run: go install github.com/onsi/ginkgo/v2/ginkgo@v2.1.3 - name: Run Tests run: | - export PATH=$PATH:$(go env GOPATH)/bin + PATH=$PATH:$(go env GOPATH)/bin + export PATH make test_smoke args="-nodes=6" - name: Publish Test Results uses: mikepenz/action-junit-report@v2 @@ -106,10 +107,11 @@ jobs: check_name: 'Smoke Test Results' - name: Publish Artifacts if: failure() - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 with: - name: test-logs + name: eth-test-logs path: ./integration-tests/logs + solana: name: Solana Tests needs: [changes, build-chainlink] @@ -126,6 +128,7 @@ jobs: QA_AWS_REGION: ${{ secrets.AWS_REGION }} QA_AWS_ROLE_TO_ASSUME: ${{ secrets.AWS_ROLE_TO_ASSUME }} QA_KUBECONFIG: ${{ secrets.KUBECONFIG }} + terra: name: Terra Tests needs: [changes, build-chainlink] @@ -142,4 +145,3 @@ jobs: QA_AWS_REGION: ${{ secrets.AWS_REGION }} QA_AWS_ROLE_TO_ASSUME: ${{ secrets.AWS_ROLE_TO_ASSUME }} QA_KUBECONFIG: ${{ secrets.KUBECONFIG }} - NPM_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/.github/workflows/lint-gh-workflows.yml b/.github/workflows/lint-gh-workflows.yml new file mode 100644 index 00000000000..f67498635df --- /dev/null +++ b/.github/workflows/lint-gh-workflows.yml @@ -0,0 +1,12 @@ +name: Lint GH Workflows +on: + push: +jobs: + lint_workflows: + name: Validate Github Action Workflows + runs-on: ubuntu-latest + steps: + - name: Check out Code + uses: actions/checkout@v3 + - name: Run actionlint + uses: reviewdog/action-actionlint@a0541743e79d2ce4ee65276807ac493a93149b7c # v1.23.0 diff --git a/.tool-versions b/.tool-versions index 572b73f6a3e..f3c2c7a3336 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,4 +1,4 @@ -golang 1.17.2 -mockery 2.8.0 +golang 1.18 +mockery 2.10.1 nodejs 16.13.2 postgres 13.3 diff --git a/CODEOWNERS b/CODEOWNERS index 9cf40a4135d..ecc4274bc0b 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -9,7 +9,7 @@ # Services /core/services/cron @spooktheducks @samsondav -/core/services/directrequest @spooktheducks @j16r @connorwstein @samsondav +/core/services/directrequest @spooktheducks @connorwstein @samsondav /core/services/feeds @jkongie /core/services/fluxmonitorv2 @jkongie @PiotrTrzpil @spooktheducks @connorwstein /core/services/health @archseer @samsondav @@ -22,7 +22,7 @@ /core/services/pipeline @spooktheducks @connorwstein @archseer @prashantkumar1982 /core/services/synchronization /core/services/telemetry -/core/services/vrf @connorwstein +/core/services/vrf @connorwstein @Nic0s @makramkd /core/services/webhook @spooktheducks @archseer # API @@ -46,6 +46,9 @@ /core/logger @jmank88 /core/internal @samsondav @jmank88 @archseer +# CI/CD +/.github/** @alexroan @chainchad @HenryNguyen5 @javuto @jkongie @jmank88 @samsondav + # Dependencies contracts/scripts/requirements.txt @smartcontractkit/prodsec-public .tool-versions @smartcontractkit/prodsec-public diff --git a/GNUmakefile b/GNUmakefile index 656f0df15c8..15fda52cd89 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -75,11 +75,11 @@ testdb-user-only: ## Prepares the test database with user only. presubmit: ## Format go files and imports. goimports -w ./core gofmt -w ./core - go mod tidy -compat=1.17 + go mod tidy .PHONY: mockery mockery: $(mockery) ## Install mockery. - go install github.com/vektra/mockery/v2@v2.8.0 + go install github.com/vektra/mockery/v2@v2.10.1 .PHONY: telemetry-protobuf telemetry-protobuf: $(telemetry-protobuf) ## Generate telemetry protocol buffers. diff --git a/README.md b/README.md index 3376b070122..9ceaf61c593 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ regarding Chainlink social accounts, news, and networking. ## Build Chainlink -1. [Install Go 1.17](https://golang.org/doc/install), and add your GOPATH's [bin directory to your PATH](https://golang.org/doc/code.html#GOPATH) +1. [Install Go 1.18](https://golang.org/doc/install), and add your GOPATH's [bin directory to your PATH](https://golang.org/doc/code.html#GOPATH) - Example Path for macOS `export PATH=$GOPATH/bin:$PATH` & `export GOPATH=/Users/$USER/go` 2. Install [NodeJS](https://nodejs.org/en/download/package-manager/) & [Yarn](https://yarnpkg.com/lang/en/docs/install/). See the current version in `package.json` at the root of this repo under the `engines.node` key. - It might be easier long term to use [nvm](https://nodejs.org/en/download/package-manager/#nvm) to switch between node versions for different projects. For example, assuming $NODE_VERSION was set to a valid version of NodeJS, you could run: `nvm install $NODE_VERSION && nvm use $NODE_VERSION` @@ -48,6 +48,26 @@ regarding Chainlink social accounts, news, and networking. For the latest information on setting up a development environment, see the [Development Setup Guide](https://github.com/smartcontractkit/chainlink/wiki/Development-Setup-Guide). +### Mac M1/ARM64 [EXPERIMENTAL] + +Chainlink can be experimentally compiled with ARM64 as the target arch. You may run into errors with cosmwasm: + +``` +# github.com/CosmWasm/wasmvm/api +ld: warning: ignoring file ../../../.asdf/installs/golang/1.18/packages/pkg/mod/github.com/!cosm!wasm/wasmvm@v0.16.3/api/libwasmvm.dylib, building for macOS-arm64 but attempting to link with file built for macOS-x86_64 +Undefined symbols for architecture arm64:# github.com/CosmWasm/wasmvm/api +ld: warning: ignoring file ../../../.asdf/installs/golang/1.18/packages/pkg/mod/github.com/!cosm!wasm/wasmvm@v0.16.3/api/libwasmvm.dylib, building for macOS-arm64 but attempting to link with file built for macOS-x86_64 +Undefined symbols for architecture arm64: +``` + +In this case, try the following steps: + +1. `git clone git@github.com:mandrean/terra-core.git` +2. `cd terra-core; git checkout feat/multiarch` +3. `make install; cd ..` +4. `go work init /path/to/chainlink` +5. `go work use /path/to/terra-core` + ### Ethereum Node Requirements In order to run the Chainlink node you must have access to a running Ethereum node with an open websocket connection. @@ -98,19 +118,21 @@ To find out more about the Chainlink CLI, you can always run `chainlink help`. Check out the [doc](https://docs.chain.link/) pages on [Jobs](https://docs.chain.link/docs/jobs/) to learn more about how to create Jobs. -## Configuration +### Configuration Node configuration is managed by a combination of environment variables and direct setting via API/UI/CLI. Check the [official documentation](https://docs.chain.link/docs/configuration-variables) for more information on how to configure your node. -## External Adapters +### External Adapters External adapters are what make Chainlink easily extensible, providing simple integration of custom computations and specialized APIs. A Chainlink node communicates with external adapters via a simple REST API. For more information on creating and using external adapters, please see our [external adapters page](https://docs.chain.link/docs/external-adapters). -## Running tests +## Development + +### Running tests 1. [Install Yarn](https://yarnpkg.com/lang/en/docs/install) @@ -157,13 +179,24 @@ If you do end up modifying the migrations for the database, you will need to rer go test ./... ``` -### Notes +#### Notes - The `parallel` flag can be used to limit CPU usage, for running tests in the background (`-parallel=4`) - the default is `GOMAXPROCS` - The `p` flag can be used to limit the number of _packages_ tested concurrently, if they are interferring with one another (`-p=1`) - The `-short` flag skips tests which depend on the database, for quickly spot checking simpler tests in around one minute (you may still need a phony env var to pass some validation: `DATABASE_URL=_test`) -### Solidity Development +#### Fuzz tests + +As of Go 1.18, fuzz tests `func FuzzXXX(*testing.F)` are included as part of the normal test suite, so existing cases are executed with `go test`. + +Additionally, you can run active fuzzing to search for new cases: +```bash +go test ./pkg/path -run=XXX -fuzz=FuzzTestName +``` + +https://go.dev/doc/fuzz/ + +### Solidity Inside the `contracts/` directory: 1. Install dependencies: @@ -178,7 +211,7 @@ yarn yarn test ``` -### Use of Go Generate +### Code Generation Go generate is used to generate mocks in this project. Mocks are generated with [mockery](https://github.com/vektra/mockery) and live in core/internal/mocks. @@ -208,11 +241,11 @@ createuser --superuser --no-password chainlink -h localhost Now you can run tests or compile code as usual. -### Development Tips +### Tips For more tips on how to build and test Chainlink, see our [development tips page](https://github.com/smartcontractkit/chainlink/wiki/Development-Tips). -## Contributing +### Contributing Chainlink's source code is [licensed under the MIT License](./LICENSE), and contributions are welcome. diff --git a/VERSION b/VERSION index f0bb29e7638..88c5fb891dc 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.3.0 +1.4.0 diff --git a/contracts/hardhat.config.ts b/contracts/hardhat.config.ts index 809b95fad39..ad6a8dd3c1f 100644 --- a/contracts/hardhat.config.ts +++ b/contracts/hardhat.config.ts @@ -58,6 +58,10 @@ export default { version: '0.8.6', settings: COMPILER_SETTINGS, }, + { + version: '0.8.13', + settings: COMPILER_SETTINGS, + }, ], }, contractSizer: { diff --git a/contracts/scripts/native_solc8_13_compile b/contracts/scripts/native_solc8_13_compile new file mode 100755 index 00000000000..5f9e23920c4 --- /dev/null +++ b/contracts/scripts/native_solc8_13_compile @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# This script generates .abi and .bin files for a selected .sol contract. +# Example call: +# ./contracts/scripts/native_solc_compile dev/Operator.sol +# +# The resulting abi and bin files are stored in ./contracts/solc/v0.8 + +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" + +solc-select use 0.8.13 +solc @openzeppelin/=$ROOT/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs 1000000 --metadata-hash none \ + -o $ROOT/contracts/solc/v0.8.13 \ + --abi --bin --allow-paths $ROOT/contracts/src/v0.8,$ROOT/contracts/src/v0.8/dev,$ROOT/contracts/src/v0.8/interfaces,$ROOT/contracts/src/v0.8/mocks,$ROOT/contracts/src/v0.8/tests,$ROOT/contracts/src/v0.8/vendor,$ROOT/node_modules/@openzeppelin/ \ + $ROOT/contracts/src/v0.8/$1 \ No newline at end of file diff --git a/contracts/scripts/native_solc8_6_compile b/contracts/scripts/native_solc8_6_compile new file mode 100755 index 00000000000..a80b2ea14f7 --- /dev/null +++ b/contracts/scripts/native_solc8_6_compile @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# This script generates .abi and .bin files for a selected .sol contract. +# Example call: +# ./contracts/scripts/native_solc_compile dev/Operator.sol +# +# The resulting abi and bin files are stored in ./contracts/solc/v0.8 + +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" + +solc-select use 0.8.6 +solc @openzeppelin/=$ROOT/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs 1000000 --metadata-hash none \ + -o $ROOT/contracts/solc/v0.8.6 \ + --abi --bin --allow-paths $ROOT/contracts/src/v0.8,$ROOT/contracts/src/v0.8/dev,$ROOT/contracts/src/v0.8/interfaces,$ROOT/contracts/src/v0.8/mocks,$ROOT/contracts/src/v0.8/tests,$ROOT/contracts/src/v0.8/vendor,$ROOT/node_modules/@openzeppelin/ \ + $ROOT/contracts/src/v0.8/$1 \ No newline at end of file diff --git a/contracts/scripts/native_solc8_compile b/contracts/scripts/native_solc8_compile deleted file mode 100755 index 89c7e310ef1..00000000000 --- a/contracts/scripts/native_solc8_compile +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -# This script generates .abi and .bin files for a selected .sol contract. -# Example call: -# ./contracts/scripts/native_solc_compile dev/Operator.sol -# -# The resulting abi and bin files are stored in ./contracts/solc/v0.8 - -SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd .. && pwd -P )" - -solc-select use 0.8.6 -solc --overwrite --optimize --optimize-runs 1000000 --metadata-hash none \ - -o $SCRIPTPATH/solc/v0.8 \ - --abi --bin --allow-paths $SCRIPTPATH/src/v0.8,$SCRIPTPATH/src/v0.8/dev,$SCRIPTPATH/src/v0.8/interfaces,$SCRIPTPATH/src/v0.8/mocks,$SCRIPTPATH/src/v0.8/tests,$SCRIPTPATH/src/v0.8/vendor \ - $SCRIPTPATH/src/v0.8/$1 \ No newline at end of file diff --git a/contracts/scripts/native_solc_compile_all b/contracts/scripts/native_solc_compile_all index fdb30cd30db..797ca54d518 100755 --- a/contracts/scripts/native_solc_compile_all +++ b/contracts/scripts/native_solc_compile_all @@ -8,6 +8,7 @@ python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt solc-select install 0.6.6 solc-select install 0.7.6 solc-select install 0.8.6 +solc-select install 0.8.13 $SCRIPTPATH/native_solc6_compile Flags.sol $SCRIPTPATH/native_solc6_compile Oracle.sol @@ -24,45 +25,52 @@ $SCRIPTPATH/native_solc6_compile dev/BlockhashStore.sol $SCRIPTPATH/native_solc7_compile tests/MultiWordConsumer.sol $SCRIPTPATH/native_solc7_compile Operator.sol +$SCRIPTPATH/native_solc7_compile AuthorizedForwarder.sol +$SCRIPTPATH/native_solc7_compile AuthorizedReceiver.sol $SCRIPTPATH/native_solc7_compile tests/Consumer.sol $SCRIPTPATH/native_solc7_compile tests/VRFCoordinatorMock.sol # Keeper $SCRIPTPATH/native_solc7_compile KeeperRegistry.sol -$SCRIPTPATH/native_solc7_compile KeeperRegistryVB.sol $SCRIPTPATH/native_solc7_compile UpkeepRegistrationRequests.sol $SCRIPTPATH/native_solc7_compile tests/UpkeepPerformCounterRestrictive.sol $SCRIPTPATH/native_solc7_compile tests/UpkeepCounter.sol +$SCRIPTPATH/native_solc8_6_compile factories/CronUpkeepFactory.sol +$SCRIPTPATH/native_solc8_6_compile upkeeps/CronUpkeep.sol # Aggregators -$SCRIPTPATH/native_solc8_compile interfaces/AggregatorV2V3Interface.sol +$SCRIPTPATH/native_solc8_6_compile interfaces/AggregatorV2V3Interface.sol -$SCRIPTPATH/native_solc8_compile Chainlink.sol -$SCRIPTPATH/native_solc8_compile ChainlinkClient.sol -$SCRIPTPATH/native_solc8_compile VRFRequestIDBase.sol -$SCRIPTPATH/native_solc8_compile VRFConsumerBase.sol -$SCRIPTPATH/native_solc8_compile tests/VRFConsumer.sol -$SCRIPTPATH/native_solc8_compile tests/VRFRequestIDBaseTestHelper.sol -$SCRIPTPATH/native_solc8_compile mocks/VRFCoordinatorMock.sol +$SCRIPTPATH/native_solc8_6_compile Chainlink.sol +$SCRIPTPATH/native_solc8_6_compile ChainlinkClient.sol +$SCRIPTPATH/native_solc8_6_compile VRFRequestIDBase.sol +$SCRIPTPATH/native_solc8_6_compile VRFConsumerBase.sol +$SCRIPTPATH/native_solc8_6_compile tests/VRFConsumer.sol +$SCRIPTPATH/native_solc8_6_compile tests/VRFRequestIDBaseTestHelper.sol +$SCRIPTPATH/native_solc8_6_compile mocks/VRFCoordinatorMock.sol # VRF V2 -$SCRIPTPATH/native_solc8_compile VRFConsumerBaseV2.sol -$SCRIPTPATH/native_solc8_compile tests/VRFConsumerV2.sol -$SCRIPTPATH/native_solc8_compile tests/VRFMaliciousConsumerV2.sol -$SCRIPTPATH/native_solc8_compile tests/VRFTestHelper.sol -$SCRIPTPATH/native_solc8_compile tests/VRFV2RevertingExample.sol +$SCRIPTPATH/native_solc8_6_compile VRFConsumerBaseV2.sol +$SCRIPTPATH/native_solc8_6_compile tests/VRFConsumerV2.sol +$SCRIPTPATH/native_solc8_6_compile tests/VRFMaliciousConsumerV2.sol +$SCRIPTPATH/native_solc8_6_compile tests/VRFTestHelper.sol +$SCRIPTPATH/native_solc8_6_compile tests/VRFV2RevertingExample.sol -$SCRIPTPATH/native_solc8_compile dev/BatchBlockhashStore.sol +$SCRIPTPATH/native_solc8_6_compile dev/BatchBlockhashStore.sol +$SCRIPTPATH/native_solc8_13_compile dev/BatchVRFCoordinatorV2.sol # Make sure the example consumers compile -$SCRIPTPATH/native_solc8_compile tests/VRFExternalSubOwnerExample.sol -$SCRIPTPATH/native_solc8_compile tests/VRFSingleConsumerExample.sol -$SCRIPTPATH/native_solc8_compile tests/VRFOwnerlessConsumerExample.sol -$SCRIPTPATH/native_solc8_compile tests/VRFLoadTestOwnerlessConsumer.sol -$SCRIPTPATH/native_solc8_compile tests/VRFLoadTestExternalSubOwner.sol +$SCRIPTPATH/native_solc8_6_compile tests/VRFExternalSubOwnerExample.sol +$SCRIPTPATH/native_solc8_6_compile tests/VRFSingleConsumerExample.sol +$SCRIPTPATH/native_solc8_6_compile tests/VRFOwnerlessConsumerExample.sol +$SCRIPTPATH/native_solc8_6_compile tests/VRFLoadTestOwnerlessConsumer.sol +$SCRIPTPATH/native_solc8_6_compile tests/VRFLoadTestExternalSubOwner.sol -$SCRIPTPATH/native_solc8_compile tests/VRFCoordinatorV2TestHelper.sol -$SCRIPTPATH/native_solc8_compile dev/VRFCoordinatorV2.sol +$SCRIPTPATH/native_solc8_6_compile tests/VRFCoordinatorV2TestHelper.sol +$SCRIPTPATH/native_solc8_6_compile dev/VRFCoordinatorV2.sol # Feeds -$SCRIPTPATH/native_solc8_compile dev/DerivedPriceFeed.sol +$SCRIPTPATH/native_solc8_6_compile dev/DerivedPriceFeed.sol + +# Log tester +$SCRIPTPATH/native_solc8_6_compile tests/LogEmitter.sol \ No newline at end of file diff --git a/contracts/src/v0.7/KeeperRegistry.sol b/contracts/src/v0.7/KeeperRegistry.sol index f0ab21ef900..6c1d9bfced2 100644 --- a/contracts/src/v0.7/KeeperRegistry.sol +++ b/contracts/src/v0.7/KeeperRegistry.sol @@ -65,10 +65,11 @@ contract KeeperRegistry is /** * @notice versions: + * - KeeperRegistry 1.2.0: allow funding within performUpkeep * - KeeperRegistry 1.1.0: added flatFeeMicroLink * - KeeperRegistry 1.0.0: initial release */ - string public constant override typeAndVersion = "KeeperRegistry 1.1.0"; + string public constant override typeAndVersion = "KeeperRegistry 1.2.0"; struct Upkeep { address target; @@ -726,11 +727,13 @@ contract KeeperRegistry is gasUsed = gasUsed - gasleft(); uint96 payment = calculatePaymentAmount(gasUsed, params.adjustedGasWei, params.linkEth); - upkeep.balance = upkeep.balance.sub(payment); - upkeep.lastKeeper = params.from; - s_upkeep[params.id] = upkeep; - uint96 newBalance = s_keeperInfo[params.from].balance.add(payment); - s_keeperInfo[params.from].balance = newBalance; + + uint96 newUpkeepBalance = s_upkeep[params.id].balance.sub(payment); + s_upkeep[params.id].balance = newUpkeepBalance; + s_upkeep[params.id].lastKeeper = params.from; + + uint96 newKeeperBalance = s_keeperInfo[params.from].balance.add(payment); + s_keeperInfo[params.from].balance = newKeeperBalance; emit UpkeepPerformed(params.id, success, params.from, payment, params.performData); return success; diff --git a/contracts/src/v0.7/dev/KeeperRegistrarDev.sol b/contracts/src/v0.7/dev/KeeperRegistrarDev.sol new file mode 100644 index 00000000000..6dce0ac4a0f --- /dev/null +++ b/contracts/src/v0.7/dev/KeeperRegistrarDev.sol @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: MIT +/* + * This is a development version of UpkeepRegistrationRequests (soon to be renamed to KeeperRegistrar). + * Once this is audited and finalised it will be copied to KeeperRegistrar + */ +pragma solidity ^0.7.0; + +import "../interfaces/LinkTokenInterface.sol"; +import "../interfaces/KeeperRegistryInterface.sol"; +import "../interfaces/TypeAndVersionInterface.sol"; +import "../vendor/SafeMath96.sol"; +import "../ConfirmedOwner.sol"; + +/** + * @notice Contract to accept requests for upkeep registrations + * @dev There are 2 registration workflows in this contract + * Flow 1. auto approve OFF / manual registration - UI calls `register` function on this contract, this contract owner at a later time then manually + * calls `approve` to register upkeep and emit events to inform UI and others interested. + * Flow 2. auto approve ON / real time registration - UI calls `register` function as before, which calls the `registerUpkeep` function directly on + * keeper registry and then emits approved event to finish the flow automatically without manual intervention. + * The idea is to have same interface(functions,events) for UI or anyone using this contract irrespective of auto approve being enabled or not. + * they can just listen to `RegistrationRequested` & `RegistrationApproved` events and know the status on registrations. + */ +contract KeeperRegistrarDev is TypeAndVersionInterface, ConfirmedOwner { + using SafeMath96 for uint96; + + bytes4 private constant REGISTER_REQUEST_SELECTOR = this.register.selector; + + uint256 private s_minLINKJuels; + mapping(bytes32 => PendingRequest) private s_pendingRequests; + + LinkTokenInterface public immutable LINK; + + /** + * @notice versions: + * - UpkeepRegistration 1.0.0: initial release + */ + string public constant override typeAndVersion = "KeeperRegistrar 1.0.0"; + + struct AutoApprovedConfig { + bool enabled; + uint16 allowedPerWindow; + uint32 windowSizeInBlocks; + uint64 windowStart; + uint16 approvedInCurrentWindow; + } + + struct PendingRequest { + address admin; + uint96 balance; + } + + AutoApprovedConfig private s_config; + KeeperRegistryBaseInterface private s_keeperRegistry; + + event RegistrationRequested( + bytes32 indexed hash, + string name, + bytes encryptedEmail, + address indexed upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes checkData, + uint96 amount, + uint8 indexed source + ); + + event RegistrationApproved(bytes32 indexed hash, string displayName, uint256 indexed upkeepId); + + event RegistrationRejected(bytes32 indexed hash); + + event ConfigChanged( + bool enabled, + uint32 windowSizeInBlocks, + uint16 allowedPerWindow, + address keeperRegistry, + uint256 minLINKJuels + ); + + constructor(address LINKAddress, uint256 minimumLINKJuels) ConfirmedOwner(msg.sender) { + LINK = LinkTokenInterface(LINKAddress); + s_minLINKJuels = minimumLINKJuels; + } + + //EXTERNAL + + /** + * @notice register can only be called through transferAndCall on LINK contract + * @param name string of the upkeep to be registered + * @param encryptedEmail email address of upkeep contact + * @param upkeepContract address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when performing upkeep + * @param adminAddress address to cancel upkeep and withdraw remaining funds + * @param checkData data passed to the contract when checking for upkeep + * @param amount quantity of LINK upkeep is funded with (specified in Juels) + * @param source application sending this request + */ + function register( + string memory name, + bytes calldata encryptedEmail, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes calldata checkData, + uint96 amount, + uint8 source + ) external onlyLINK { + require(adminAddress != address(0), "invalid admin address"); + bytes32 hash = keccak256(abi.encode(upkeepContract, gasLimit, adminAddress, checkData)); + + emit RegistrationRequested( + hash, + name, + encryptedEmail, + upkeepContract, + gasLimit, + adminAddress, + checkData, + amount, + source + ); + + AutoApprovedConfig memory config = s_config; + if (config.enabled && _underApprovalLimit(config)) { + _incrementApprovedCount(config); + + _approve(name, upkeepContract, gasLimit, adminAddress, checkData, amount, hash); + } else { + uint96 newBalance = s_pendingRequests[hash].balance.add(amount); + s_pendingRequests[hash] = PendingRequest({admin: adminAddress, balance: newBalance}); + } + } + + /** + * @dev register upkeep on KeeperRegistry contract and emit RegistrationApproved event + */ + function approve( + string memory name, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes calldata checkData, + bytes32 hash + ) external onlyOwner { + PendingRequest memory request = s_pendingRequests[hash]; + require(request.admin != address(0), "request not found"); + bytes32 expectedHash = keccak256(abi.encode(upkeepContract, gasLimit, adminAddress, checkData)); + require(hash == expectedHash, "hash and payload do not match"); + delete s_pendingRequests[hash]; + _approve(name, upkeepContract, gasLimit, adminAddress, checkData, request.balance, hash); + } + + /** + * @notice cancel will remove a registration request and return the refunds to the msg.sender + * @param hash the request hash + */ + function cancel(bytes32 hash) external { + PendingRequest memory request = s_pendingRequests[hash]; + require(msg.sender == request.admin || msg.sender == owner(), "only admin / owner can cancel"); + require(request.admin != address(0), "request not found"); + delete s_pendingRequests[hash]; + require(LINK.transfer(msg.sender, request.balance), "LINK token transfer failed"); + emit RegistrationRejected(hash); + } + + /** + * @notice owner calls this function to set if registration requests should be sent directly to the Keeper Registry + * @param enabled setting for auto-approve registrations + * @param windowSizeInBlocks window size defined in number of blocks + * @param allowedPerWindow number of registrations that can be auto approved in above window + * @param keeperRegistry new keeper registry address + */ + function setRegistrationConfig( + bool enabled, + uint32 windowSizeInBlocks, + uint16 allowedPerWindow, + address keeperRegistry, + uint256 minLINKJuels + ) external onlyOwner { + s_config = AutoApprovedConfig({ + enabled: enabled, + allowedPerWindow: allowedPerWindow, + windowSizeInBlocks: windowSizeInBlocks, + windowStart: 0, + approvedInCurrentWindow: 0 + }); + s_minLINKJuels = minLINKJuels; + s_keeperRegistry = KeeperRegistryBaseInterface(keeperRegistry); + + emit ConfigChanged(enabled, windowSizeInBlocks, allowedPerWindow, keeperRegistry, minLINKJuels); + } + + /** + * @notice read the current registration configuration + */ + function getRegistrationConfig() + external + view + returns ( + bool enabled, + uint32 windowSizeInBlocks, + uint16 allowedPerWindow, + address keeperRegistry, + uint256 minLINKJuels, + uint64 windowStart, + uint16 approvedInCurrentWindow + ) + { + AutoApprovedConfig memory config = s_config; + return ( + config.enabled, + config.windowSizeInBlocks, + config.allowedPerWindow, + address(s_keeperRegistry), + s_minLINKJuels, + config.windowStart, + config.approvedInCurrentWindow + ); + } + + /** + * @notice gets the admin address and the current balance of a registration request + */ + function getPendingRequest(bytes32 hash) external view returns (address, uint96) { + PendingRequest memory request = s_pendingRequests[hash]; + return (request.admin, request.balance); + } + + /** + * @notice Called when LINK is sent to the contract via `transferAndCall` + * @param amount Amount of LINK sent (specified in Juels) + * @param data Payload of the transaction + */ + function onTokenTransfer( + address, /* sender */ + uint256 amount, + bytes calldata data + ) external onlyLINK permittedFunctionsForLINK(data) isActualAmount(amount, data) { + require(amount >= s_minLINKJuels, "Insufficient payment"); + (bool success, ) = address(this).delegatecall(data); + // calls register + require(success, "Unable to create request"); + } + + //PRIVATE + + /** + * @dev reset auto approve window if passed end of current window + */ + function _resetWindowIfRequired(AutoApprovedConfig memory config) private { + uint64 blocksPassed = uint64(block.number - config.windowStart); + if (blocksPassed >= config.windowSizeInBlocks) { + config.windowStart = uint64(block.number); + config.approvedInCurrentWindow = 0; + s_config = config; + } + } + + /** + * @dev register upkeep on KeeperRegistry contract and emit RegistrationApproved event + */ + function _approve( + string memory name, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes calldata checkData, + uint96 amount, + bytes32 hash + ) private { + KeeperRegistryBaseInterface keeperRegistry = s_keeperRegistry; + + // register upkeep + uint256 upkeepId = keeperRegistry.registerUpkeep(upkeepContract, gasLimit, adminAddress, checkData); + // fund upkeep + bool success = LINK.transferAndCall(address(keeperRegistry), amount, abi.encode(upkeepId)); + require(success, "failed to fund upkeep"); + + emit RegistrationApproved(hash, name, upkeepId); + } + + /** + * @dev determine approval limits and check if in range + */ + function _underApprovalLimit(AutoApprovedConfig memory config) private returns (bool) { + _resetWindowIfRequired(config); + if (config.approvedInCurrentWindow < config.allowedPerWindow) { + return true; + } + return false; + } + + /** + * @dev record new latest approved count + */ + function _incrementApprovedCount(AutoApprovedConfig memory config) private { + config.approvedInCurrentWindow++; + s_config = config; + } + + //MODIFIERS + + /** + * @dev Reverts if not sent from the LINK token + */ + modifier onlyLINK() { + require(msg.sender == address(LINK), "Must use LINK token"); + _; + } + + /** + * @dev Reverts if the given data does not begin with the `register` function selector + * @param _data The data payload of the request + */ + modifier permittedFunctionsForLINK(bytes memory _data) { + bytes4 funcSelector; + assembly { + // solhint-disable-next-line avoid-low-level-calls + funcSelector := mload(add(_data, 32)) + } + require(funcSelector == REGISTER_REQUEST_SELECTOR, "Must use whitelisted functions"); + _; + } + + /** + * @dev Reverts if the actual amount passed does not match the expected amount + * @param expected amount that should match the actual amount + * @param data bytes + */ + modifier isActualAmount(uint256 expected, bytes memory data) { + uint256 actual; + assembly { + actual := mload(add(data, 228)) + } + require(expected == actual, "Amount mismatch"); + _; + } +} diff --git a/contracts/src/v0.7/KeeperRegistryVB.sol b/contracts/src/v0.7/dev/KeeperRegistryDev.sol similarity index 94% rename from contracts/src/v0.7/KeeperRegistryVB.sol rename to contracts/src/v0.7/dev/KeeperRegistryDev.sol index 0df01713ab6..d0b2411ff0b 100644 --- a/contracts/src/v0.7/KeeperRegistryVB.sol +++ b/contracts/src/v0.7/dev/KeeperRegistryDev.sol @@ -1,25 +1,30 @@ // SPDX-License-Identifier: MIT +/* + * This is a development version of KeeperRegistry. Once it's audited and finalised + * it will be copied to KeeperRegistry + */ + pragma solidity ^0.7.0; -import "./interfaces/AggregatorV3Interface.sol"; -import "./interfaces/LinkTokenInterface.sol"; -import "./interfaces/KeeperCompatibleInterface.sol"; -import "./interfaces/KeeperRegistryInterface.sol"; -import "./interfaces/TypeAndVersionInterface.sol"; -import "./vendor/SafeMathChainlink.sol"; -import "./vendor/Address.sol"; -import "./vendor/Pausable.sol"; -import "./vendor/ReentrancyGuard.sol"; -import "./vendor/SignedSafeMath.sol"; -import "./vendor/SafeMath96.sol"; -import "./KeeperBase.sol"; -import "./ConfirmedOwner.sol"; +import "../interfaces/AggregatorV3Interface.sol"; +import "../interfaces/LinkTokenInterface.sol"; +import "../interfaces/KeeperCompatibleInterface.sol"; +import "../interfaces/KeeperRegistryInterface.sol"; +import "../interfaces/TypeAndVersionInterface.sol"; +import "../vendor/SafeMathChainlink.sol"; +import "../vendor/Address.sol"; +import "../vendor/Pausable.sol"; +import "../vendor/ReentrancyGuard.sol"; +import "../vendor/SignedSafeMath.sol"; +import "../vendor/SafeMath96.sol"; +import "../KeeperBase.sol"; +import "../ConfirmedOwner.sol"; /** * @notice Registry for adding work for Chainlink Keepers to perform on client * contracts. Clients must support the Upkeep interface. */ -contract KeeperRegistryVB is +contract KeeperRegistryDev is TypeAndVersionInterface, ConfirmedOwner, KeeperBase, @@ -65,10 +70,11 @@ contract KeeperRegistryVB is /** * @notice versions: + * - KeeperRegistry 1.2.0: allow funding within performUpkeep * - KeeperRegistry 1.1.0: added flatFeeMicroLink * - KeeperRegistry 1.0.0: initial release */ - string public constant override typeAndVersion = "KeeperRegistry 1.1.0"; + string public constant override typeAndVersion = "KeeperRegistry 1.2.0"; struct Upkeep { address target; @@ -92,7 +98,6 @@ contract KeeperRegistryVB is uint32 checkGasLimit; uint24 stalenessSeconds; uint16 gasCeilingMultiplier; - bool mustTakeTurns; } struct PerformParams { @@ -123,8 +128,7 @@ contract KeeperRegistryVB is uint24 stalenessSeconds, uint16 gasCeilingMultiplier, uint256 fallbackGasPrice, - uint256 fallbackLinkPrice, - bool mustTakeTurns + uint256 fallbackLinkPrice ); event FlatFeeSet(uint32 flatFeeMicroLink); event KeepersUpdated(address[] keepers, address[] payees); @@ -163,8 +167,7 @@ contract KeeperRegistryVB is uint24 stalenessSeconds, uint16 gasCeilingMultiplier, uint256 fallbackGasPrice, - uint256 fallbackLinkPrice, - bool mustTakeTurns + uint256 fallbackLinkPrice ) ConfirmedOwner(msg.sender) { LINK = LinkTokenInterface(link); LINK_ETH_FEED = AggregatorV3Interface(linkEthFeed); @@ -178,8 +181,7 @@ contract KeeperRegistryVB is stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, - fallbackLinkPrice, - mustTakeTurns + fallbackLinkPrice ); } @@ -432,7 +434,6 @@ contract KeeperRegistryVB is * be stale before switching to the fallback pricing * @param fallbackGasPrice gas price used if the gas price feed is stale * @param fallbackLinkPrice LINK price used if the LINK price feed is stale - * @param mustTakeTurns flag if true requires node performing Upkeep be different then previous */ function setConfig( uint32 paymentPremiumPPB, @@ -442,8 +443,7 @@ contract KeeperRegistryVB is uint24 stalenessSeconds, uint16 gasCeilingMultiplier, uint256 fallbackGasPrice, - uint256 fallbackLinkPrice, - bool mustTakeTurns + uint256 fallbackLinkPrice ) public onlyOwner { s_config = Config({ paymentPremiumPPB: paymentPremiumPPB, @@ -451,8 +451,7 @@ contract KeeperRegistryVB is blockCountPerTurn: blockCountPerTurn, checkGasLimit: checkGasLimit, stalenessSeconds: stalenessSeconds, - gasCeilingMultiplier: gasCeilingMultiplier, - mustTakeTurns: mustTakeTurns + gasCeilingMultiplier: gasCeilingMultiplier }); s_fallbackGasPrice = fallbackGasPrice; s_fallbackLinkPrice = fallbackLinkPrice; @@ -464,8 +463,7 @@ contract KeeperRegistryVB is stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, - fallbackLinkPrice, - mustTakeTurns + fallbackLinkPrice ); emit FlatFeeSet(flatFeeMicroLink); } @@ -570,13 +568,6 @@ contract KeeperRegistryVB is return s_registrar; } - /** - * @notice read the current config value for mustTakeTurns - */ - function getMustTakeTurns() external view returns (bool) { - return s_config.mustTakeTurns; - } - /** * @notice read the current info about any keeper address */ @@ -688,8 +679,7 @@ contract KeeperRegistryVB is uint256 premium = PPB_BASE.add(config.paymentPremiumPPB); uint256 total = weiForGas.mul(1e9).mul(premium).div(linkEth).add(uint256(config.flatFeeMicroLink).mul(1e12)); require(total <= LINK_TOTAL_SUPPLY, "payment greater than all LINK"); - return uint96(total); - // LINK_TOTAL_SUPPLY < UINT96_MAX + return uint96(total); // LINK_TOTAL_SUPPLY < UINT96_MAX } /** @@ -742,11 +732,13 @@ contract KeeperRegistryVB is gasUsed = gasUsed - gasleft(); uint96 payment = calculatePaymentAmount(gasUsed, params.adjustedGasWei, params.linkEth); - upkeep.balance = upkeep.balance.sub(payment); - upkeep.lastKeeper = params.from; - s_upkeep[params.id] = upkeep; - uint96 newBalance = s_keeperInfo[params.from].balance.add(payment); - s_keeperInfo[params.from].balance = newBalance; + + uint96 newUpkeepBalance = s_upkeep[params.id].balance.sub(payment); + s_upkeep[params.id].balance = newUpkeepBalance; + s_upkeep[params.id].lastKeeper = params.from; + + uint96 newKeeperBalance = s_keeperInfo[params.from].balance.add(payment); + s_keeperInfo[params.from].balance = newKeeperBalance; emit UpkeepPerformed(params.id, success, params.from, payment, params.performData); return success; @@ -769,9 +761,7 @@ contract KeeperRegistryVB is ) private view { require(s_keeperInfo[from].active, "only active keepers"); require(upkeep.balance >= maxLinkPayment, "insufficient funds"); - if (s_config.mustTakeTurns) { - require(upkeep.lastKeeper != from, "keepers must take turns"); - } + require(upkeep.lastKeeper != from, "keepers must take turns"); } /** diff --git a/contracts/src/v0.7/tests/UpkeepAutoFunder.sol b/contracts/src/v0.7/tests/UpkeepAutoFunder.sol new file mode 100644 index 00000000000..9de92d03ff8 --- /dev/null +++ b/contracts/src/v0.7/tests/UpkeepAutoFunder.sol @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../KeeperCompatible.sol"; +import "../interfaces/LinkTokenInterface.sol"; +import "../interfaces/KeeperRegistryInterface.sol"; +import "../ConfirmedOwner.sol"; + +contract UpkeepAutoFunder is KeeperCompatible, ConfirmedOwner { + bool public s_isEligible; + bool public s_shouldCancel; + uint256 public s_upkeepId; + uint96 public s_autoFundLink; + LinkTokenInterface public immutable LINK; + KeeperRegistryBaseInterface public immutable s_keeperRegistry; + + constructor(address linkAddress, address registryAddress) ConfirmedOwner(msg.sender) { + LINK = LinkTokenInterface(linkAddress); + s_keeperRegistry = KeeperRegistryBaseInterface(registryAddress); + + s_isEligible = false; + s_shouldCancel = false; + s_upkeepId = 0; + s_autoFundLink = 0; + } + + function setShouldCancel(bool value) external onlyOwner { + s_shouldCancel = value; + } + + function setIsEligible(bool value) external onlyOwner { + s_isEligible = value; + } + + function setAutoFundLink(uint96 value) external onlyOwner { + s_autoFundLink = value; + } + + function setUpkeepId(uint256 value) external onlyOwner { + s_upkeepId = value; + } + + function checkUpkeep(bytes calldata data) + external + override + cannotExecute + returns (bool callable, bytes calldata executedata) + { + return (s_isEligible, data); + } + + function performUpkeep(bytes calldata data) external override { + require(s_isEligible, "Upkeep should be eligible"); + s_isEligible = false; // Allow upkeep only once until it is set again + + // Topup upkeep so it can be called again + LINK.transferAndCall(address(s_keeperRegistry), s_autoFundLink, abi.encode(s_upkeepId)); + + if (s_shouldCancel) { + s_keeperRegistry.cancelUpkeep(s_upkeepId); + } + } +} diff --git a/contracts/src/v0.7/tests/UpkeepPerformCounterRestrictive.sol b/contracts/src/v0.7/tests/UpkeepPerformCounterRestrictive.sol index 0b14e424be5..35e28584a09 100644 --- a/contracts/src/v0.7/tests/UpkeepPerformCounterRestrictive.sol +++ b/contracts/src/v0.7/tests/UpkeepPerformCounterRestrictive.sol @@ -7,7 +7,11 @@ contract UpkeepPerformCounterRestrictive { uint256 public nextEligible = 0; uint256 public testRange; uint256 public averageEligibilityCadence; - uint256 count = 0; + uint256 public checkGasToBurn; + uint256 public performGasToBurn; + mapping(bytes32 => bool) public dummyMap; // used to force storage lookup + + uint256 private count = 0; constructor(uint256 _testRange, uint256 _averageEligibilityCadence) { testRange = _testRange; @@ -15,10 +19,19 @@ contract UpkeepPerformCounterRestrictive { } function checkUpkeep(bytes calldata data) external view returns (bool, bytes memory) { - return (eligible(), bytes("")); + uint256 startGas = gasleft(); + uint256 blockNum = block.number - 1; + bool dummy; + // burn gas + while (startGas - gasleft() < checkGasToBurn) { + dummy = dummy && dummyMap[blockhash(blockNum)]; // arbitrary storage reads + blockNum--; + } + return (eligible(), abi.encode(dummy)); } - function performUpkeep(bytes calldata data) external { + function performUpkeep(bytes calldata) external { + uint256 startGas = gasleft(); bool eligible = eligible(); uint256 blockNum = block.number; emit PerformingUpkeep(eligible, tx.origin, initialCall, nextEligible, blockNum); @@ -28,6 +41,20 @@ contract UpkeepPerformCounterRestrictive { } nextEligible = (blockNum + (rand() % (averageEligibilityCadence * 2))) + 1; count++; + // burn gas + blockNum--; + while (startGas - gasleft() < performGasToBurn) { + dummyMap[blockhash(blockNum)] = false; // arbitrary storage writes + blockNum--; + } + } + + function setCheckGasToBurn(uint256 value) public { + checkGasToBurn = value; + } + + function setPerformGasToBurn(uint256 value) public { + performGasToBurn = value; } function getCountPerforms() public view returns (uint256) { diff --git a/contracts/src/v0.8/PermissionedForwardProxy.sol b/contracts/src/v0.8/PermissionedForwardProxy.sol new file mode 100644 index 00000000000..e4f1535cb60 --- /dev/null +++ b/contracts/src/v0.8/PermissionedForwardProxy.sol @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.13; + +import "@openzeppelin/contracts/utils/Address.sol"; +import "./ConfirmedOwner.sol"; + +/** + * @title PermissionedForwardProxy + * @notice This proxy is used to forward calls from sender to target. It maintains + * a permission list to check which sender is allowed to call which target + */ +contract PermissionedForwardProxy is ConfirmedOwner { + using Address for address; + + error PermissionNotSet(); + + event PermissionSet(address indexed sender, address target); + event PermissionRemoved(address indexed sender); + + mapping(address => address) private s_forwardPermissionList; + + constructor() ConfirmedOwner(msg.sender) {} + + /** + * @notice Verifies if msg.sender has permission to forward to target address and then forwards the handler + * @param target address of the contract to forward the handler to + * @param handler bytes to be passed to target in call data + */ + function forward(address target, bytes calldata handler) external { + if (s_forwardPermissionList[msg.sender] != target) { + revert PermissionNotSet(); + } + target.functionCall(handler); + } + + /** + * @notice Adds permission for sender to forward calls to target via this proxy. + * Note that it allows to overwrite an existing permission + * @param sender The address who will use this proxy to forward calls + * @param target The address where sender will be allowed to forward calls + */ + function setPermission(address sender, address target) external onlyOwner { + s_forwardPermissionList[sender] = target; + + emit PermissionSet(sender, target); + } + + /** + * @notice Removes permission for sender to forward calls via this proxy + * @param sender The address who will use this proxy to forward calls + */ + function removePermission(address sender) external onlyOwner { + delete s_forwardPermissionList[sender]; + + emit PermissionRemoved(sender); + } + + /** + * @notice Returns the target address that the sender can use this proxy for + * @param sender The address to fetch the permissioned target for + */ + function getPermission(address sender) external view returns (address) { + return s_forwardPermissionList[sender]; + } +} diff --git a/contracts/src/v0.8/dev/BatchVRFCoordinatorV2.sol b/contracts/src/v0.8/dev/BatchVRFCoordinatorV2.sol new file mode 100644 index 00000000000..15b41c090b0 --- /dev/null +++ b/contracts/src/v0.8/dev/BatchVRFCoordinatorV2.sol @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.13; + +import "./VRFTypes.sol"; + +/** + * @title BatchVRFCoordinatorV2 + * @notice The BatchVRFCoordinatorV2 contract acts as a proxy to write many random responses to the + * provided VRFCoordinatorV2 contract efficiently in a single transaction. + */ +contract BatchVRFCoordinatorV2 { + VRFCoordinatorV2 public immutable COORDINATOR; + + event ErrorReturned(uint256 indexed requestId, string reason); + event RawErrorReturned(uint256 indexed requestId, bytes lowLevelData); + + constructor(address coordinatorAddr) { + COORDINATOR = VRFCoordinatorV2(coordinatorAddr); + } + + /** + * @notice fulfills multiple randomness requests with the provided proofs and commitments. + * @param proofs the randomness proofs generated by the VRF provider. + * @param rcs the request commitments corresponding to the randomness proofs. + */ + function fulfillRandomWords(VRFTypes.Proof[] memory proofs, VRFTypes.RequestCommitment[] memory rcs) external { + require(proofs.length == rcs.length, "input array arg lengths mismatch"); + for (uint256 i = 0; i < proofs.length; i++) { + try COORDINATOR.fulfillRandomWords(proofs[i], rcs[i]) returns ( + uint96 /* payment */ + ) { + continue; + } catch Error(string memory reason) { + uint256 requestId = getRequestIdFromProof(proofs[i]); + emit ErrorReturned(requestId, reason); + } catch (bytes memory lowLevelData) { + uint256 requestId = getRequestIdFromProof(proofs[i]); + emit RawErrorReturned(requestId, lowLevelData); + } + } + } + + /** + * @notice Returns the proving key hash associated with this public key. + * @param publicKey the key to return the hash of. + */ + function hashOfKey(uint256[2] memory publicKey) internal pure returns (bytes32) { + return keccak256(abi.encode(publicKey)); + } + + /** + * @notice Returns the request ID of the request associated with the given proof. + * @param proof the VRF proof provided by the VRF oracle. + */ + function getRequestIdFromProof(VRFTypes.Proof memory proof) internal pure returns (uint256) { + bytes32 keyHash = hashOfKey(proof.pk); + return uint256(keccak256(abi.encode(keyHash, proof.seed))); + } +} + +interface VRFCoordinatorV2 { + function fulfillRandomWords(VRFTypes.Proof memory proof, VRFTypes.RequestCommitment memory rc) + external + returns (uint96); +} diff --git a/contracts/src/v0.8/dev/VRFCoordinatorV2.sol b/contracts/src/v0.8/dev/VRFCoordinatorV2.sol index 86a252323e5..37e184d5174 100644 --- a/contracts/src/v0.8/dev/VRFCoordinatorV2.sol +++ b/contracts/src/v0.8/dev/VRFCoordinatorV2.sol @@ -826,15 +826,12 @@ contract VRFCoordinatorV2 is emit SubscriptionCanceled(subId, to, balance); } - /* - * @noticeCheck to see if there exists a request commitment consumers - * for all consumers and keyhashes for a given sub. - * @param subId where to send the funds - * @return exits true if outstanding requests + /** + * @inheritdoc VRFCoordinatorV2Interface * @dev Looping is bounded to MAX_CONSUMERS*(number of keyhashes). * @dev Used to disable subscription canceling while outstanding request are present. */ - function pendingRequestExists(uint64 subId) public view returns (bool) { + function pendingRequestExists(uint64 subId) public view override returns (bool) { SubscriptionConfig memory subConfig = s_subscriptionConfigs[subId]; for (uint256 i = 0; i < subConfig.consumers.length; i++) { for (uint256 j = 0; j < s_provingKeyHashes.length; j++) { diff --git a/contracts/src/v0.8/dev/VRFTypes.sol b/contracts/src/v0.8/dev/VRFTypes.sol new file mode 100644 index 00000000000..c09227529f1 --- /dev/null +++ b/contracts/src/v0.8/dev/VRFTypes.sol @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.13; + +/** + * @title VRFTypes + * @notice The VRFTypes library is a collection of types that is required to fulfill VRF requests + * on-chain. They must be ABI-compatible with the types used by the coordinator contracts. + */ +library VRFTypes { + // ABI-compatible with VRF.Proof. + // This proof is used for VRF V2. + struct Proof { + uint256[2] pk; + uint256[2] gamma; + uint256 c; + uint256 s; + uint256 seed; + address uWitness; + uint256[2] cGammaWitness; + uint256[2] sHashWitness; + uint256 zInv; + } + + // ABI-compatible with VRFCoordinatorV2.RequestCommitment. + // This is only used for VRF V2. + struct RequestCommitment { + uint64 blockNum; + uint64 subId; + uint32 callbackGasLimit; + uint32 numWords; + address sender; + } +} diff --git a/contracts/src/v0.8/factories/CronUpkeepFactory.sol b/contracts/src/v0.8/factories/CronUpkeepFactory.sol index 3372bf5d1fd..5a2c8ffbae2 100644 --- a/contracts/src/v0.8/factories/CronUpkeepFactory.sol +++ b/contracts/src/v0.8/factories/CronUpkeepFactory.sol @@ -4,6 +4,8 @@ pragma solidity 0.8.6; import "../upkeeps/CronUpkeep.sol"; import "../upkeeps/CronUpkeepDelegate.sol"; +import "../ConfirmedOwner.sol"; +import {Spec, Cron as CronExternal} from "../libraries/external/Cron.sol"; /** * @title The CronUpkeepFactory contract @@ -11,27 +13,68 @@ import "../upkeeps/CronUpkeepDelegate.sol"; * delegate their checkUpkeep calls onto this contract. Utilizing this pattern reduces the size * of the CronUpkeep contracts. */ -contract CronUpkeepFactory { +contract CronUpkeepFactory is ConfirmedOwner { event NewCronUpkeepCreated(address upkeep, address owner); address private immutable s_cronDelegate; + uint256 public s_maxJobs = 5; - constructor() { + constructor() ConfirmedOwner(msg.sender) { s_cronDelegate = address(new CronUpkeepDelegate()); } /** * @notice Creates a new CronUpkeep contract, with msg.sender as the owner */ - function newCronUpkeep() public { - emit NewCronUpkeepCreated(address(new CronUpkeep(msg.sender, s_cronDelegate)), msg.sender); + function newCronUpkeep() external { + newCronUpkeepWithJob(bytes("")); + } + + /** + * @notice Creates a new CronUpkeep contract, with msg.sender as the owner, and registers a cron job + */ + function newCronUpkeepWithJob(bytes memory encodedJob) public { + emit NewCronUpkeepCreated(address(new CronUpkeep(msg.sender, s_cronDelegate, s_maxJobs, encodedJob)), msg.sender); + } + + /** + * @notice Sets the max job limit on new cron upkeeps + */ + function setMaxJobs(uint256 maxJobs) external onlyOwner { + s_maxJobs = maxJobs; } /** * @notice Gets the address of the delegate contract * @return the address of the delegate contract */ - function cronDelegateAddress() public view returns (address) { + function cronDelegateAddress() external view returns (address) { return s_cronDelegate; } + + /** + * @notice Converts a cron string to a Spec, validates the spec, and encodes the spec. + * This should only be called off-chain, as it is gas expensive! + * @param cronString the cron string to convert and encode + * @return the abi encoding of the Spec struct representing the cron string + */ + function encodeCronString(string memory cronString) external pure returns (bytes memory) { + return CronExternal.toEncodedSpec(cronString); + } + + /** + * @notice Converts, validates, and encodes a full cron spec. This payload is then passed to newCronUpkeepWithJob. + * @param target the destination contract of a cron job + * @param handler the function signature on the target contract to call + * @param cronString the cron string to convert and encode + * @return the abi encoding of the entire cron job + */ + function encodeCronJob( + address target, + bytes memory handler, + string memory cronString + ) external pure returns (bytes memory) { + Spec memory spec = CronExternal.toSpec(cronString); + return abi.encode(target, handler, spec); + } } diff --git a/contracts/src/v0.8/interfaces/VRFCoordinatorV2Interface.sol b/contracts/src/v0.8/interfaces/VRFCoordinatorV2Interface.sol index 1ddf2776a36..d962dc194fd 100644 --- a/contracts/src/v0.8/interfaces/VRFCoordinatorV2Interface.sol +++ b/contracts/src/v0.8/interfaces/VRFCoordinatorV2Interface.sol @@ -113,4 +113,13 @@ interface VRFCoordinatorV2Interface { * @param to - Where to send the remaining LINK to */ function cancelSubscription(uint64 subId, address to) external; + + /* + * @notice Check to see if there exists a request commitment consumers + * for all consumers and keyhashes for a given sub. + * @param subId - ID of the subscription + * @return true if there exists at least one unfulfilled request for the subscription, false + * otherwise. + */ + function pendingRequestExists(uint64 subId) external view returns (bool); } diff --git a/contracts/src/v0.8/mocks/VRFCoordinatorV2Mock.sol b/contracts/src/v0.8/mocks/VRFCoordinatorV2Mock.sol index 584d61308a6..80e91c5440f 100644 --- a/contracts/src/v0.8/mocks/VRFCoordinatorV2Mock.sol +++ b/contracts/src/v0.8/mocks/VRFCoordinatorV2Mock.sol @@ -198,4 +198,8 @@ contract VRFCoordinatorV2Mock is VRFCoordinatorV2Interface { function acceptSubscriptionOwnerTransfer(uint64 _subId) external pure override { revert("not implemented"); } + + function pendingRequestExists(uint64 subId) public view override returns (bool) { + revert("not implemented"); + } } diff --git a/contracts/src/v0.8/tests/Counter.sol b/contracts/src/v0.8/tests/Counter.sol new file mode 100644 index 00000000000..1ceb7891490 --- /dev/null +++ b/contracts/src/v0.8/tests/Counter.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +contract Counter { + error AlwaysRevert(); + + uint256 public count = 0; + + function increment() public returns (uint256) { + count += 1; + return count; + } + + function reset() public { + count = 0; + } + + function alwaysRevert() public pure { + revert AlwaysRevert(); + } + + function alwaysRevertWithString() public pure { + revert("always revert"); + } +} diff --git a/contracts/src/v0.8/tests/CronTestHelper.sol b/contracts/src/v0.8/tests/CronTestHelper.sol index 14639dd94e9..f50054715d2 100644 --- a/contracts/src/v0.8/tests/CronTestHelper.sol +++ b/contracts/src/v0.8/tests/CronTestHelper.sol @@ -31,14 +31,6 @@ contract CronInternalTestHelper { return CronInternal.toCronString(spec); } - /** - * @notice encodedSpecToString is a helper function for turning a string - * into a spec struct. - */ - function cronStringtoEncodedSpec(string memory cronString) public pure returns (Spec memory) { - return CronInternal.toSpec(cronString); - } - /** * @notice calculateNextTick calculates the next time a cron job should "tick". * This should only be called off-chain, as it is gas expensive! @@ -86,14 +78,6 @@ contract CronExternalTestHelper { return CronExternal.toCronString(spec); } - /** - * @notice encodedSpecToString is a helper function for turning a string - * into a spec struct. - */ - function cronStringtoEncodedSpec(string memory cronString) public pure returns (Spec memory) { - return CronExternal.toSpec(cronString); - } - /** * @notice calculateNextTick calculates the next time a cron job should "tick". * This should only be called off-chain, as it is gas expensive! diff --git a/contracts/src/v0.8/tests/CronUpkeepTestHelper.sol b/contracts/src/v0.8/tests/CronUpkeepTestHelper.sol index b9bbc23d35e..9406d10b4d4 100644 --- a/contracts/src/v0.8/tests/CronUpkeepTestHelper.sol +++ b/contracts/src/v0.8/tests/CronUpkeepTestHelper.sol @@ -14,7 +14,12 @@ contract CronUpkeepTestHelper is CronUpkeep { using Cron for Spec; using Cron for string; - constructor(address owner, address delegate) CronUpkeep(owner, delegate) {} + constructor( + address owner, + address delegate, + uint256 maxJobs, + bytes memory firstJob + ) CronUpkeep(owner, delegate, maxJobs, firstJob) {} /** * @notice createCronJobFromString is a helper function for creating cron jobs diff --git a/contracts/src/v0.8/tests/LogEmitter.sol b/contracts/src/v0.8/tests/LogEmitter.sol new file mode 100644 index 00000000000..4d7b9799eb1 --- /dev/null +++ b/contracts/src/v0.8/tests/LogEmitter.sol @@ -0,0 +1,25 @@ +pragma solidity ^0.8.0; + +contract LogEmitter { + event Log1(uint256); + event Log2(uint256 indexed); + event Log3(string); + + function EmitLog1(uint256[] memory v) public { + for (uint256 i = 0; i < v.length; i++) { + emit Log1(v[i]); + } + } + + function EmitLog2(uint256[] memory v) public { + for (uint256 i = 0; i < v.length; i++) { + emit Log2(v[i]); + } + } + + function EmitLog3(string[] memory v) public { + for (uint256 i = 0; i < v.length; i++) { + emit Log3(v[i]); + } + } +} diff --git a/contracts/src/v0.8/upkeeps/CronUpkeep.sol b/contracts/src/v0.8/upkeeps/CronUpkeep.sol index eea1ad5853b..5628cba688e 100644 --- a/contracts/src/v0.8/upkeeps/CronUpkeep.sol +++ b/contracts/src/v0.8/upkeeps/CronUpkeep.sol @@ -18,7 +18,6 @@ pragma solidity 0.8.6; -import "@openzeppelin/contracts/security/Pausable.sol"; import "@openzeppelin/contracts/security/Pausable.sol"; import "@openzeppelin/contracts/proxy/Proxy.sol"; import "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; @@ -40,16 +39,19 @@ contract CronUpkeep is KeeperCompatibleInterface, KeeperBase, ConfirmedOwner, Pa event CronJobExecuted(uint256 indexed id, uint256 timestamp); event CronJobCreated(uint256 indexed id, address target, bytes handler); + event CronJobUpdated(uint256 indexed id, address target, bytes handler); event CronJobDeleted(uint256 indexed id); error CallFailed(uint256 id, string reason); error CronJobIDNotFound(uint256 id); + error ExceedsMaxJobs(); error InvalidHandler(); error TickInFuture(); error TickTooOld(); error TickDoesntMatchSpec(); address immutable s_delegate; + uint256 public immutable s_maxJobs; uint256 private s_nextCronJobID = 1; EnumerableSet.UintSet private s_activeCronJobIDs; @@ -62,9 +64,21 @@ contract CronUpkeep is KeeperCompatibleInterface, KeeperBase, ConfirmedOwner, Pa /** * @param owner the initial owner of the contract * @param delegate the contract to delegate checkUpkeep calls to + * @param maxJobs the max number of cron jobs this contract will support + * @param firstJob an optional encoding of the first cron job */ - constructor(address owner, address delegate) ConfirmedOwner(owner) { + constructor( + address owner, + address delegate, + uint256 maxJobs, + bytes memory firstJob + ) ConfirmedOwner(owner) { s_delegate = delegate; + s_maxJobs = maxJobs; + if (firstJob.length > 0) { + (address target, bytes memory handler, Spec memory spec) = abi.decode(firstJob, (address, bytes, Spec)); + createCronJobFromSpec(target, handler, spec); + } } /** @@ -95,11 +109,35 @@ contract CronUpkeep is KeeperCompatibleInterface, KeeperBase, ConfirmedOwner, Pa address target, bytes memory handler, bytes memory encodedCronSpec - ) external { + ) external onlyOwner { + if (s_activeCronJobIDs.length() >= s_maxJobs) { + revert ExceedsMaxJobs(); + } Spec memory spec = abi.decode(encodedCronSpec, (Spec)); createCronJobFromSpec(target, handler, spec); } + /** + * @notice Updates a cron job from the given encoded spec + * @param id the id of the cron job to update + * @param newTarget the destination contract of a cron job + * @param newHandler the function signature on the target contract to call + * @param newEncodedCronSpec abi encoding of a cron spec + */ + function updateCronJob( + uint256 id, + address newTarget, + bytes memory newHandler, + bytes memory newEncodedCronSpec + ) external onlyOwner onlyValidCronID(id) { + Spec memory newSpec = abi.decode(newEncodedCronSpec, (Spec)); + s_targets[id] = newTarget; + s_handlers[id] = newHandler; + s_specs[id] = newSpec; + s_handlerSignatures[id] = handlerSig(newTarget, newHandler); + emit CronJobUpdated(id, newTarget, newHandler); + } + /** * @notice Deletes the cron job matching the provided id. Reverts if * the id is not found. @@ -174,16 +212,6 @@ contract CronUpkeep is KeeperCompatibleInterface, KeeperBase, ConfirmedOwner, Pa return (s_targets[id], s_handlers[id], CronExternal.toCronString(spec), CronExternal.nextTick(spec)); } - /** - * @notice Converts a cron string to a Spec, validates the spec, and encodes the spec. - * This should only be called off-chain, as it is gas expensive! - * @param cronString the cron string to convert and encode - * @return the abi encoding of the Spec struct representing the cron string - */ - function cronStringToEncodedSpec(string memory cronString) external pure returns (bytes memory) { - return CronExternal.toEncodedSpec(cronString); - } - /** * @notice Adds a cron spec to storage and the ID to the list of jobs * @param target the destination contract of a cron job @@ -194,7 +222,7 @@ contract CronUpkeep is KeeperCompatibleInterface, KeeperBase, ConfirmedOwner, Pa address target, bytes memory handler, Spec memory spec - ) internal onlyOwner { + ) internal { uint256 newID = s_nextCronJobID; s_activeCronJobIDs.add(newID); s_targets[newID] = target; diff --git a/contracts/src/v0.8/upkeeps/CronUpkeepDelegate.sol b/contracts/src/v0.8/upkeeps/CronUpkeepDelegate.sol index 8dd203049dd..ec2c2a0fd91 100644 --- a/contracts/src/v0.8/upkeeps/CronUpkeepDelegate.sol +++ b/contracts/src/v0.8/upkeeps/CronUpkeepDelegate.sol @@ -33,6 +33,9 @@ contract CronUpkeepDelegate { // DEV: start at a random spot in the list so that checks are // spread evenly among cron jobs uint256 numCrons = s_activeCronJobIDs.length(); + if (numCrons == 0) { + return (false, bytes("")); + } uint256 startIdx = block.number % numCrons; bool result; bytes memory payload; @@ -52,7 +55,7 @@ contract CronUpkeepDelegate { * @param start the starting id to check (inclusive) * @param end the ending id to check (exclusive) * @return upkeepNeeded signals if upkeep is needed, performData is an abi encoding - * of the id and "next tick" of the elligible cron job + * of the id and "next tick" of the eligible cron job */ function checkInRange(uint256 start, uint256 end) private view returns (bool, bytes memory) { uint256 id; diff --git a/contracts/test/v0.7/KeeperRegistrarDev.test.ts b/contracts/test/v0.7/KeeperRegistrarDev.test.ts new file mode 100644 index 00000000000..018251d9e18 --- /dev/null +++ b/contracts/test/v0.7/KeeperRegistrarDev.test.ts @@ -0,0 +1,609 @@ +/* + * This test is for KeeperRegistrarDev contract which is the development version of + * UpkeepRegistrationRequests (to be renamed to KeeperRegistrar). Until it's audited + * and finalised this test will be used for development. There are 2 places marked in + * the test which will need to be changed when these tests are ported back to the prod version + */ + +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { evmRevert } from '../test-helpers/matchers' +import { getUsers, Personas } from '../test-helpers/setup' +import { BigNumber, Signer } from 'ethers' +import { LinkToken__factory as LinkTokenFactory } from '../../typechain/factories/LinkToken__factory' + +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../typechain/factories/MockV3Aggregator__factory' +import { UpkeepMock__factory as UpkeepMockFactory } from '../../typechain/factories/UpkeepMock__factory' +// These 4 dependencies are mocked from Dev +import { KeeperRegistryDev as KeeperRegistry } from '../../typechain/KeeperRegistryDev' +import { KeeperRegistrarDev as KeeperRegistrar } from '../../typechain/KeeperRegistrarDev' +import { KeeperRegistryDev__factory as KeeperRegistryFactory } from '../../typechain/factories/KeeperRegistryDev__factory' +import { KeeperRegistrarDev__factory as KeeperRegistrarFactory } from '../../typechain/factories/KeeperRegistrarDev__factory' + +import { MockV3Aggregator } from '../../typechain/MockV3Aggregator' +import { LinkToken } from '../../typechain/LinkToken' +import { UpkeepMock } from '../../typechain/UpkeepMock' + +let linkTokenFactory: LinkTokenFactory +let mockV3AggregatorFactory: MockV3AggregatorFactory +let keeperRegistryFactory: KeeperRegistryFactory +let keeperRegistrar: KeeperRegistrarFactory +let upkeepMockFactory: UpkeepMockFactory + +let personas: Personas + +before(async () => { + personas = (await getUsers()).personas + + linkTokenFactory = await ethers.getContractFactory('LinkToken') + mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.7/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + keeperRegistryFactory = await ethers.getContractFactory('KeeperRegistry') + // Lifts the Dev contract + keeperRegistrar = await ethers.getContractFactory('KeeperRegistrarDev') + upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') +}) + +const errorMsgs = { + onlyOwner: 'revert Only callable by owner', + onlyAdmin: 'only admin / owner can cancel', + hashPayload: 'hash and payload do not match', + requestNotFound: 'request not found', +} + +describe('KeeperRegistrar', () => { + const upkeepName = 'SampleUpkeep' + + const linkEth = BigNumber.from(300000000) + const gasWei = BigNumber.from(100) + const executeGas = BigNumber.from(100000) + const source = BigNumber.from(100) + const paymentPremiumPPB = BigNumber.from(250000000) + const flatFeeMicroLink = BigNumber.from(0) + + const window_big = BigNumber.from(1000) + const window_small = BigNumber.from(2) + const threshold_big = BigNumber.from(1000) + const threshold_small = BigNumber.from(5) + + const blockCountPerTurn = BigNumber.from(3) + const emptyBytes = '0x00' + const stalenessSeconds = BigNumber.from(43820) + const gasCeilingMultiplier = BigNumber.from(1) + const maxCheckGas = BigNumber.from(20000000) + const fallbackGasPrice = BigNumber.from(200) + const fallbackLinkPrice = BigNumber.from(200000000) + const minLINKJuels = BigNumber.from('1000000000000000000') + const amount = BigNumber.from('5000000000000000000') + const amount1 = BigNumber.from('6000000000000000000') + + let owner: Signer + let admin: Signer + let someAddress: Signer + let registrarOwner: Signer + let stranger: Signer + + let linkToken: LinkToken + let linkEthFeed: MockV3Aggregator + let gasPriceFeed: MockV3Aggregator + let registry: KeeperRegistry + let mock: UpkeepMock + let registrar: KeeperRegistrar + + beforeEach(async () => { + owner = personas.Default + admin = personas.Neil + someAddress = personas.Ned + registrarOwner = personas.Nelly + stranger = personas.Nancy + + linkToken = await linkTokenFactory.connect(owner).deploy() + gasPriceFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(0, gasWei) + linkEthFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(9, linkEth) + registry = await keeperRegistryFactory + .connect(owner) + .deploy( + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + maxCheckGas, + stalenessSeconds, + gasCeilingMultiplier, + fallbackGasPrice, + fallbackLinkPrice, + ) + + mock = await upkeepMockFactory.deploy() + + registrar = await keeperRegistrar + .connect(registrarOwner) + .deploy(linkToken.address, minLINKJuels) + + await registry.setRegistrar(registrar.address) + }) + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await registrar.typeAndVersion() + assert.equal(typeAndVersion, 'KeeperRegistrar 1.0.0') + }) + }) + + describe('#register', () => { + it('reverts if not called by the LINK token', async () => { + await evmRevert( + registrar + .connect(someAddress) + .register( + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + ), + 'Must use LINK token', + ) + }) + + it('reverts if the amount passed in data mismatches actual amount sent', async () => { + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + true, + window_small, + threshold_big, + registry.address, + minLINKJuels, + ) + + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount1, + source, + ], + ) + + await evmRevert( + linkToken.transferAndCall(registrar.address, amount, abiEncodedBytes), + 'Amount mismatch', + ) + }) + + it('reverts if the admin address is 0x0000...', async () => { + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + '0x0000000000000000000000000000000000000000', + emptyBytes, + amount, + source, + ], + ) + + await evmRevert( + linkToken.transferAndCall(registrar.address, amount, abiEncodedBytes), + 'Unable to create request', + ) + }) + + it('Auto Approve ON - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => { + //get current upkeep count + const upkeepCount = await registry.getUpkeepCount() + + //set auto approve ON with high threshold limits + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + true, + window_small, + threshold_big, + registry.address, + minLINKJuels, + ) + + //register with auto approve ON + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + ], + ) + const tx = await linkToken.transferAndCall( + registrar.address, + amount, + abiEncodedBytes, + ) + + //confirm if a new upkeep has been registered and the details are the same as the one just registered + const newupkeep = await registry.getUpkeep(upkeepCount) + assert.equal(newupkeep.target, mock.address) + assert.equal(newupkeep.admin, await admin.getAddress()) + assert.equal(newupkeep.checkData, emptyBytes) + assert.equal(newupkeep.balance.toString(), amount.toString()) + assert.equal(newupkeep.executeGas, executeGas.toNumber()) + + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('Auto Approve OFF - does not registers an upkeep on KeeperRegistry, emits only RegistrationRequested event', async () => { + //get upkeep count before attempting registration + const beforeCount = await registry.getUpkeepCount() + + //set auto approve OFF, threshold limits dont matter in this case + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + false, + window_small, + threshold_big, + registry.address, + minLINKJuels, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + ], + ) + const tx = await linkToken.transferAndCall( + registrar.address, + amount, + abiEncodedBytes, + ) + const receipt = await tx.wait() + + //get upkeep count after attempting registration + const afterCount = await registry.getUpkeepCount() + //confirm that a new upkeep has NOT been registered and upkeep count is still the same + assert.deepEqual(beforeCount, afterCount) + + //confirm that only RegistrationRequested event is emitted and RegistrationApproved event is not + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).not.to.emit(registrar, 'RegistrationApproved') + + const hash = receipt.logs[2].topics[1] + const pendingRequest = await registrar.getPendingRequest(hash) + assert.equal(await admin.getAddress(), pendingRequest[0]) + assert.ok(amount.eq(pendingRequest[1])) + }) + + it('Auto Approve ON - Throttle max approvals - does not registers an upkeep on KeeperRegistry beyond the throttle limit, emits only RegistrationRequested event after throttle starts', async () => { + //get upkeep count before attempting registration + const beforeCount = await registry.getUpkeepCount() + + //set auto approve on, with low threshold limits + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + true, + window_big, + threshold_small, + registry.address, + minLINKJuels, + ) + + let abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + ]) + + //register within threshold, new upkeep should be registered + await linkToken.transferAndCall( + registrar.address, + amount, + abiEncodedBytes, + ) + const intermediateCount = await registry.getUpkeepCount() + //make sure 1 upkeep was registered + assert.equal(beforeCount.toNumber() + 1, intermediateCount.toNumber()) + + //try registering more than threshold(say 2x), new upkeeps should not be registered after the threshold amount is reached + for (let step = 0; step < threshold_small.toNumber() * 2; step++) { + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + executeGas.toNumber() + step, // make unique hash + await admin.getAddress(), + emptyBytes, + amount, + source, + ]) + + await linkToken.transferAndCall( + registrar.address, + amount, + abiEncodedBytes, + ) + } + const afterCount = await registry.getUpkeepCount() + //count of newly registered upkeeps should be equal to the threshold set for auto approval + const newRegistrationsCount = + afterCount.toNumber() - beforeCount.toNumber() + assert( + newRegistrationsCount == threshold_small.toNumber(), + 'Registrations beyond threshold', + ) + }) + }) + + describe('#approve', () => { + let hash: string + + beforeEach(async () => { + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + false, + window_small, + threshold_big, + registry.address, + minLINKJuels, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + ], + ) + + const tx = await linkToken.transferAndCall( + registrar.address, + amount, + abiEncodedBytes, + ) + const receipt = await tx.wait() + hash = receipt.logs[2].topics[1] + }) + + it('reverts if not called by the owner', async () => { + const tx = registrar + .connect(stranger) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + hash, + ) + await evmRevert(tx, 'Only callable by owner') + }) + + it('reverts if the hash does not exist', async () => { + const tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + '0x000000000000000000000000322813fd9a801c5507c9de605d63cea4f2ce6c44', + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + + it('reverts if any member of the payload changes', async () => { + let tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + ethers.Wallet.createRandom().address, + executeGas, + await admin.getAddress(), + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + 10000, + await admin.getAddress(), + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + ethers.Wallet.createRandom().address, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + '0x1234', + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + }) + + it('approves an existing registration request', async () => { + const tx = await registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + hash, + ) + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('deletes the request afterwards / reverts if the request DNE', async () => { + await registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + hash, + ) + const tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + }) + + describe('#cancel', () => { + let hash: string + + beforeEach(async () => { + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + false, + window_small, + threshold_big, + registry.address, + minLINKJuels, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + ], + ) + const tx = await linkToken.transferAndCall( + registrar.address, + amount, + abiEncodedBytes, + ) + const receipt = await tx.wait() + hash = receipt.logs[2].topics[1] + // submit duplicate request (increase balance) + await linkToken.transferAndCall( + registrar.address, + amount, + abiEncodedBytes, + ) + }) + + it('reverts if not called by the admin / owner', async () => { + const tx = registrar.connect(stranger).cancel(hash) + await evmRevert(tx, errorMsgs.onlyAdmin) + }) + + it('reverts if the hash does not exist', async () => { + const tx = registrar + .connect(registrarOwner) + .cancel( + '0x000000000000000000000000322813fd9a801c5507c9de605d63cea4f2ce6c44', + ) + await evmRevert(tx, 'request not found') + }) + + it('refunds the total request balance to the admin address', async () => { + const before = await linkToken.balanceOf(await admin.getAddress()) + const tx = await registrar.connect(admin).cancel(hash) + const after = await linkToken.balanceOf(await admin.getAddress()) + assert.isTrue(after.sub(before).eq(amount.mul(BigNumber.from(2)))) + await expect(tx).to.emit(registrar, 'RegistrationRejected') + }) + + it('deletes the request hash', async () => { + await registrar.connect(registrarOwner).cancel(hash) + let tx = registrar.connect(registrarOwner).cancel(hash) + await evmRevert(tx, errorMsgs.requestNotFound) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + }) +}) diff --git a/contracts/test/v0.7/KeeperRegistry.test.ts b/contracts/test/v0.7/KeeperRegistry.test.ts index 8c39956f2f0..9feb588ceea 100644 --- a/contracts/test/v0.7/KeeperRegistry.test.ts +++ b/contracts/test/v0.7/KeeperRegistry.test.ts @@ -8,6 +8,7 @@ import { KeeperRegistry__factory as KeeperRegistryFactory } from '../../typechai import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../typechain/factories/MockV3Aggregator__factory' import { UpkeepMock__factory as UpkeepMockFactory } from '../../typechain/factories/UpkeepMock__factory' import { UpkeepReverter__factory as UpkeepReverterFactory } from '../../typechain/factories/UpkeepReverter__factory' +import { UpkeepAutoFunder__factory as UpkeepAutoFunderFactory } from '../../typechain/factories/UpkeepAutoFunder__factory' import { KeeperRegistry } from '../../typechain/KeeperRegistry' import { MockV3Aggregator } from '../../typechain/MockV3Aggregator' import { LinkToken } from '../../typechain/LinkToken' @@ -31,6 +32,7 @@ let mockV3AggregatorFactory: MockV3AggregatorFactory let keeperRegistryFactory: KeeperRegistryFactory let upkeepMockFactory: UpkeepMockFactory let upkeepReverterFactory: UpkeepReverterFactory +let upkeepAutoFunderFactory: UpkeepAutoFunderFactory let personas: Personas @@ -45,6 +47,7 @@ before(async () => { keeperRegistryFactory = await ethers.getContractFactory('KeeperRegistry') upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') upkeepReverterFactory = await ethers.getContractFactory('UpkeepReverter') + upkeepAutoFunderFactory = await ethers.getContractFactory('UpkeepAutoFunder') }) describe('KeeperRegistry', () => { @@ -171,7 +174,7 @@ describe('KeeperRegistry', () => { describe('#typeAndVersion', () => { it('uses the correct type and version', async () => { const typeAndVersion = await registry.typeAndVersion() - assert.equal(typeAndVersion, 'KeeperRegistry 1.1.0') + assert.equal(typeAndVersion, 'KeeperRegistry 1.2.0') }) }) @@ -915,6 +918,86 @@ describe('KeeperRegistry', () => { assert.isNotEmpty(eventLog?.[1].args?.[3]) assert.equal(eventLog?.[1].args?.[4], performData) }) + + it('can self fund', async () => { + const autoFunderUpkeep = await upkeepAutoFunderFactory + .connect(owner) + .deploy(linkToken.address, registry.address) + const tx = await registry + .connect(owner) + .registerUpkeep( + autoFunderUpkeep.address, + executeGas, + autoFunderUpkeep.address, + emptyBytes, + ) + const upkeepID = await getUpkeepID(tx) + await autoFunderUpkeep.setUpkeepId(upkeepID) + // Give enough funds for upkeep as well as to the upkeep contract + await linkToken.connect(owner).approve(registry.address, toWei('1000')) + await linkToken + .connect(owner) + .transfer(autoFunderUpkeep.address, toWei('1000')) + let maxPayment = await registry.getMaxPaymentForGas(executeGas) + + // First set auto funding amount to 0 and verify that balance is deducted upon performUpkeep + let initialBalance = toWei('100') + await registry.connect(owner).addFunds(upkeepID, initialBalance) + await autoFunderUpkeep.setAutoFundLink(0) + await autoFunderUpkeep.setIsEligible(true) + await registry.connect(keeper1).performUpkeep(upkeepID, '0x') + + let postUpkeepBalance = (await registry.getUpkeep(upkeepID)).balance + assert.isTrue(postUpkeepBalance.lt(initialBalance)) // Balance should be deducted + assert.isTrue(postUpkeepBalance.gte(initialBalance.sub(maxPayment))) // Balance should not be deducted more than maxPayment + + // Now set auto funding amount to 100 wei and verify that the balance increases + initialBalance = postUpkeepBalance + let autoTopupAmount = toWei('100') + await autoFunderUpkeep.setAutoFundLink(autoTopupAmount) + await autoFunderUpkeep.setIsEligible(true) + await registry.connect(keeper2).performUpkeep(upkeepID, '0x') + + postUpkeepBalance = (await registry.getUpkeep(upkeepID)).balance + // Balance should increase by autoTopupAmount and decrease by max maxPayment + assert.isTrue( + postUpkeepBalance.gte( + initialBalance.add(autoTopupAmount).sub(maxPayment), + ), + ) + }) + + it('can self cancel', async () => { + const autoFunderUpkeep = await upkeepAutoFunderFactory + .connect(owner) + .deploy(linkToken.address, registry.address) + const tx = await registry + .connect(owner) + .registerUpkeep( + autoFunderUpkeep.address, + executeGas, + autoFunderUpkeep.address, + emptyBytes, + ) + const upkeepID = await getUpkeepID(tx) + await autoFunderUpkeep.setUpkeepId(upkeepID) + + await linkToken.connect(owner).approve(registry.address, toWei('1000')) + await registry.connect(owner).addFunds(upkeepID, toWei('100')) + await autoFunderUpkeep.setIsEligible(true) + await autoFunderUpkeep.setShouldCancel(true) + + let registration = await registry.getUpkeep(upkeepID) + const oldExpiration = registration.maxValidBlocknumber + + // Do the thing + await registry.connect(keeper1).performUpkeep(upkeepID, '0x') + + // Verify upkeep gets cancelled + registration = await registry.getUpkeep(upkeepID) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) }) }) diff --git a/contracts/test/v0.7/KeeperRegistryDev.test.ts b/contracts/test/v0.7/KeeperRegistryDev.test.ts new file mode 100644 index 00000000000..087a1335d15 --- /dev/null +++ b/contracts/test/v0.7/KeeperRegistryDev.test.ts @@ -0,0 +1,1822 @@ +/* + * This test is for KeeperRegistryDev contract which is the + * development version of KeeperRegistry. Until it's audited and finalised + * this test will be used for development. There are 2 places marked in the test + * which will need to be changed when these tests are ported back to the prod version + */ + +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { evmRevert } from '../test-helpers/matchers' +import { getUsers, Personas } from '../test-helpers/setup' +import { BigNumber, Signer, BigNumberish } from 'ethers' +import { LinkToken__factory as LinkTokenFactory } from '../../typechain/factories/LinkToken__factory' +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../typechain/factories/MockV3Aggregator__factory' +import { UpkeepMock__factory as UpkeepMockFactory } from '../../typechain/factories/UpkeepMock__factory' +import { UpkeepReverter__factory as UpkeepReverterFactory } from '../../typechain/factories/UpkeepReverter__factory' +import { UpkeepAutoFunder__factory as UpkeepAutoFunderFactory } from '../../typechain/factories/UpkeepAutoFunder__factory' +// These 2 dependencies are mocked from Dev +import { KeeperRegistryDev__factory as KeeperRegistryFactory } from '../../typechain/factories/KeeperRegistryDev__factory' +import { KeeperRegistryDev as KeeperRegistry } from '../../typechain/KeeperRegistryDev' + +import { MockV3Aggregator } from '../../typechain/MockV3Aggregator' +import { LinkToken } from '../../typechain/LinkToken' +import { UpkeepMock } from '../../typechain/UpkeepMock' +import { toWei } from '../test-helpers/helpers' + +async function getUpkeepID(tx: any) { + const receipt = await tx.wait() + return receipt.events[0].args.id +} + +// ----------------------------------------------------------------------------------------------- +// DEV: these *should* match the perform/check gas overhead values in the contract and on the node +const PERFORM_GAS_OVERHEAD = BigNumber.from(90000) +const CHECK_GAS_OVERHEAD = BigNumber.from(170000) +// ----------------------------------------------------------------------------------------------- + +// Smart contract factories +let linkTokenFactory: LinkTokenFactory +let mockV3AggregatorFactory: MockV3AggregatorFactory +let keeperRegistryFactory: KeeperRegistryFactory +let upkeepMockFactory: UpkeepMockFactory +let upkeepReverterFactory: UpkeepReverterFactory +let upkeepAutoFunderFactory: UpkeepAutoFunderFactory + +let personas: Personas + +before(async () => { + personas = (await getUsers()).personas + + linkTokenFactory = await ethers.getContractFactory('LinkToken') + // need full path because there are two contracts with name MockV3Aggregator + mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.7/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + // Lifts the Dev contract + keeperRegistryFactory = await ethers.getContractFactory('KeeperRegistryDev') + upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') + upkeepReverterFactory = await ethers.getContractFactory('UpkeepReverter') + upkeepAutoFunderFactory = await ethers.getContractFactory('UpkeepAutoFunder') +}) + +describe('KeeperRegistry', () => { + const linkEth = BigNumber.from(300000000) + const gasWei = BigNumber.from(100) + const linkDivisibility = BigNumber.from('1000000000000000000') + const executeGas = BigNumber.from('100000') + const paymentPremiumBase = BigNumber.from('1000000000') + const paymentPremiumPPB = BigNumber.from('250000000') + const flatFeeMicroLink = BigNumber.from(0) + const blockCountPerTurn = BigNumber.from(3) + const emptyBytes = '0x00' + const zeroAddress = ethers.constants.AddressZero + const extraGas = BigNumber.from('250000') + const registryGasOverhead = BigNumber.from('80000') + const stalenessSeconds = BigNumber.from(43820) + const gasCeilingMultiplier = BigNumber.from(1) + const maxCheckGas = BigNumber.from(20000000) + const fallbackGasPrice = BigNumber.from(200) + const fallbackLinkPrice = BigNumber.from(200000000) + + let owner: Signer + let keeper1: Signer + let keeper2: Signer + let keeper3: Signer + let nonkeeper: Signer + let admin: Signer + let payee1: Signer + let payee2: Signer + let payee3: Signer + + let linkToken: LinkToken + let linkEthFeed: MockV3Aggregator + let gasPriceFeed: MockV3Aggregator + let registry: KeeperRegistry + let mock: UpkeepMock + + let id: BigNumber + let keepers: string[] + let payees: string[] + + beforeEach(async () => { + owner = personas.Default + keeper1 = personas.Carol + keeper2 = personas.Eddy + keeper3 = personas.Nancy + nonkeeper = personas.Ned + admin = personas.Neil + payee1 = personas.Nelly + payee2 = personas.Norbert + payee3 = personas.Nick + + keepers = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + await keeper3.getAddress(), + ] + payees = [ + await payee1.getAddress(), + await payee2.getAddress(), + await payee3.getAddress(), + ] + + linkToken = await linkTokenFactory.connect(owner).deploy() + gasPriceFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(0, gasWei) + linkEthFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(9, linkEth) + registry = await keeperRegistryFactory + .connect(owner) + .deploy( + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + maxCheckGas, + stalenessSeconds, + gasCeilingMultiplier, + fallbackGasPrice, + fallbackLinkPrice, + ) + + mock = await upkeepMockFactory.deploy() + await linkToken + .connect(owner) + .transfer(await keeper1.getAddress(), toWei('1000')) + await linkToken + .connect(owner) + .transfer(await keeper2.getAddress(), toWei('1000')) + await linkToken + .connect(owner) + .transfer(await keeper3.getAddress(), toWei('1000')) + + await registry.connect(owner).setKeepers(keepers, payees) + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + id = await getUpkeepID(tx) + }) + + const linkForGas = ( + upkeepGasSpent: BigNumberish, + premiumPPB?: BigNumberish, + flatFee?: BigNumberish, + ) => { + premiumPPB = premiumPPB === undefined ? paymentPremiumPPB : premiumPPB + flatFee = flatFee === undefined ? flatFeeMicroLink : flatFee + const gasSpent = registryGasOverhead.add(BigNumber.from(upkeepGasSpent)) + const base = gasWei.mul(gasSpent).mul(linkDivisibility).div(linkEth) + const premium = base.mul(premiumPPB).div(paymentPremiumBase) + const flatFeeJules = BigNumber.from(flatFee).mul('1000000000000') + return base.add(premium).add(flatFeeJules) + } + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await registry.typeAndVersion() + assert.equal(typeAndVersion, 'KeeperRegistry 1.2.0') + }) + }) + + describe('#setKeepers', () => { + const IGNORE_ADDRESS = '0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF' + it('reverts when not called by the owner', async () => { + await evmRevert( + registry.connect(keeper1).setKeepers([], []), + 'Only callable by owner', + ) + }) + + it('reverts when adding the same keeper twice', async () => { + await evmRevert( + registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper1.getAddress()], + [await payee1.getAddress(), await payee1.getAddress()], + ), + 'cannot add keeper twice', + ) + }) + + it('reverts with different numbers of keepers/payees', async () => { + await evmRevert( + registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper2.getAddress()], + [await payee1.getAddress()], + ), + 'address lists not the same length', + ) + await evmRevert( + registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress()], + [await payee1.getAddress(), await payee2.getAddress()], + ), + 'address lists not the same length', + ) + }) + + it('reverts if the payee is the zero address', async () => { + await evmRevert( + registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper2.getAddress()], + [ + await payee1.getAddress(), + '0x0000000000000000000000000000000000000000', + ], + ), + 'cannot set payee to the zero address', + ) + }) + + it('emits events for every keeper added and removed', async () => { + const oldKeepers = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + ] + const oldPayees = [await payee1.getAddress(), await payee2.getAddress()] + await registry.connect(owner).setKeepers(oldKeepers, oldPayees) + assert.deepEqual(oldKeepers, await registry.getKeeperList()) + + // remove keepers + const newKeepers = [ + await keeper2.getAddress(), + await keeper3.getAddress(), + ] + const newPayees = [await payee2.getAddress(), await payee3.getAddress()] + const tx = await registry.connect(owner).setKeepers(newKeepers, newPayees) + assert.deepEqual(newKeepers, await registry.getKeeperList()) + + await expect(tx) + .to.emit(registry, 'KeepersUpdated') + .withArgs(newKeepers, newPayees) + }) + + it('updates the keeper to inactive when removed', async () => { + await registry.connect(owner).setKeepers(keepers, payees) + await registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper3.getAddress()], + [await payee1.getAddress(), await payee3.getAddress()], + ) + const added = await registry.getKeeperInfo(await keeper1.getAddress()) + assert.isTrue(added.active) + const removed = await registry.getKeeperInfo(await keeper2.getAddress()) + assert.isFalse(removed.active) + }) + + it('does not change the payee if IGNORE_ADDRESS is used as payee', async () => { + const oldKeepers = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + ] + const oldPayees = [await payee1.getAddress(), await payee2.getAddress()] + await registry.connect(owner).setKeepers(oldKeepers, oldPayees) + assert.deepEqual(oldKeepers, await registry.getKeeperList()) + + const newKeepers = [ + await keeper2.getAddress(), + await keeper3.getAddress(), + ] + const newPayees = [IGNORE_ADDRESS, await payee3.getAddress()] + const tx = await registry.connect(owner).setKeepers(newKeepers, newPayees) + assert.deepEqual(newKeepers, await registry.getKeeperList()) + + const ignored = await registry.getKeeperInfo(await keeper2.getAddress()) + assert.equal(await payee2.getAddress(), ignored.payee) + assert.equal(true, ignored.active) + + await expect(tx) + .to.emit(registry, 'KeepersUpdated') + .withArgs(newKeepers, newPayees) + }) + + it('reverts if the owner changes the payee', async () => { + await registry.connect(owner).setKeepers(keepers, payees) + await evmRevert( + registry + .connect(owner) + .setKeepers(keepers, [ + await payee1.getAddress(), + await payee2.getAddress(), + await owner.getAddress(), + ]), + 'cannot change payee', + ) + }) + }) + + describe('#registerUpkeep', () => { + it('reverts if the target is not a contract', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + zeroAddress, + executeGas, + await admin.getAddress(), + emptyBytes, + ), + 'target is not a contract', + ) + }) + + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry + .connect(keeper1) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ), + 'Only callable by owner or registrar', + ) + }) + + it('reverts if execute gas is too low', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + mock.address, + 2299, + await admin.getAddress(), + emptyBytes, + ), + 'min gas is 2300', + ) + }) + + it('reverts if execute gas is too high', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + mock.address, + 5000001, + await admin.getAddress(), + emptyBytes, + ), + 'max gas is 5000000', + ) + }) + + it('creates a record of the registration', async () => { + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + id = await getUpkeepID(tx) + await expect(tx) + .to.emit(registry, 'UpkeepRegistered') + .withArgs(id, executeGas, await admin.getAddress()) + const registration = await registry.getUpkeep(id) + assert.equal(mock.address, registration.target) + assert.equal(0, registration.balance.toNumber()) + assert.equal(emptyBytes, registration.checkData) + assert(registration.maxValidBlocknumber.eq('0xffffffffffffffff')) + }) + }) + + describe('#addFunds', () => { + const amount = toWei('1') + + beforeEach(async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + }) + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).addFunds(id.add(1), amount), + 'upkeep must be active', + ) + }) + + it('adds to the balance of the registration', async () => { + await registry.connect(keeper1).addFunds(id, amount) + const registration = await registry.getUpkeep(id) + assert.isTrue(amount.eq(registration.balance)) + }) + + it('emits a log', async () => { + const tx = await registry.connect(keeper1).addFunds(id, amount) + await expect(tx) + .to.emit(registry, 'FundsAdded') + .withArgs(id, await keeper1.getAddress(), amount) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(id) + await evmRevert( + registry.connect(keeper1).addFunds(id, amount), + 'upkeep must be active', + ) + }) + }) + + describe('#checkUpkeep', () => { + it('reverts if the upkeep is not funded', async () => { + await mock.setCanPerform(true) + await mock.setCanCheck(true) + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'insufficient funds', + ) + }) + + context('when the registration is funded', () => { + beforeEach(async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + await registry.connect(keeper1).addFunds(id, toWei('100')) + }) + + it('reverts if executed', async () => { + await mock.setCanPerform(true) + await mock.setCanCheck(true) + await evmRevert( + registry.checkUpkeep(id, await keeper1.getAddress()), + 'only for simulated backend', + ) + }) + + it('reverts if the specified keeper is not valid', async () => { + await mock.setCanPerform(true) + await mock.setCanCheck(true) + await evmRevert( + registry.checkUpkeep(id, await owner.getAddress()), + 'only for simulated backend', + ) + }) + + context('and upkeep is not needed', () => { + beforeEach(async () => { + await mock.setCanCheck(false) + }) + + it('reverts', async () => { + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'upkeep not needed', + ) + }) + }) + + context('and the upkeep check fails', () => { + beforeEach(async () => { + const reverter = await upkeepReverterFactory.deploy() + const tx = await registry + .connect(owner) + .registerUpkeep( + reverter.address, + 2500000, + await admin.getAddress(), + emptyBytes, + ) + id = await getUpkeepID(tx) + await linkToken + .connect(keeper1) + .approve(registry.address, toWei('100')) + await registry.connect(keeper1).addFunds(id, toWei('100')) + }) + + it('reverts', async () => { + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'call to check target failed', + ) + }) + }) + + context('and upkeep check simulations succeeds', () => { + beforeEach(async () => { + await mock.setCanCheck(true) + await mock.setCanPerform(true) + }) + + context('and the registry is paused', () => { + beforeEach(async () => { + await registry.connect(owner).pause() + }) + + it('reverts', async () => { + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'Pausable: paused', + ) + + await registry.connect(owner).unpause() + + await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()) + }) + }) + + it('returns true with pricing info if the target can execute', async () => { + const newGasMultiplier = BigNumber.from(10) + await registry + .connect(owner) + .setConfig( + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + maxCheckGas, + stalenessSeconds, + newGasMultiplier, + fallbackGasPrice, + fallbackLinkPrice, + ) + const response = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()) + assert.isTrue(response.gasLimit.eq(executeGas)) + assert.isTrue(response.linkEth.eq(linkEth)) + assert.isTrue( + response.adjustedGasWei.eq(gasWei.mul(newGasMultiplier)), + ) + assert.isTrue( + response.maxLinkPayment.eq( + linkForGas(executeGas.toNumber()).mul(newGasMultiplier), + ), + ) + }) + + it('has a large enough gas overhead to cover upkeeps that use all their gas [ @skip-coverage ]', async () => { + await mock.setCheckGasToBurn(maxCheckGas) + await mock.setPerformGasToBurn(executeGas) + const gas = maxCheckGas + .add(executeGas) + .add(PERFORM_GAS_OVERHEAD) + .add(CHECK_GAS_OVERHEAD) + await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress(), { + gasLimit: gas, + }) + }) + }) + }) + }) + + describe('#performUpkeep', () => { + let _lastKeeper = keeper1 + async function getPerformPaymentAmount() { + _lastKeeper = _lastKeeper === keeper1 ? keeper2 : keeper1 + const before = ( + await registry.getKeeperInfo(await _lastKeeper.getAddress()) + ).balance + await registry.connect(_lastKeeper).performUpkeep(id, '0x') + const after = ( + await registry.getKeeperInfo(await _lastKeeper.getAddress()) + ).balance + const difference = after.sub(before) + return difference + } + + it('reverts if the registration is not funded', async () => { + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'insufficient funds', + ) + }) + + context('when the registration is funded', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + }) + + it('does not revert if the target cannot execute', async () => { + const mockResponse = await mock + .connect(zeroAddress) + .callStatic.checkUpkeep('0x') + assert.isFalse(mockResponse.callable) + + await registry.connect(keeper3).performUpkeep(id, '0x') + }) + + it('returns false if the target cannot execute', async () => { + const mockResponse = await mock + .connect(zeroAddress) + .callStatic.checkUpkeep('0x') + assert.isFalse(mockResponse.callable) + + assert.isFalse( + await registry.connect(keeper1).callStatic.performUpkeep(id, '0x'), + ) + }) + + it('returns true if called', async () => { + await mock.setCanPerform(true) + + const response = await registry + .connect(keeper1) + .callStatic.performUpkeep(id, '0x') + assert.isTrue(response) + }) + + it('reverts if not enough gas supplied', async () => { + await mock.setCanPerform(true) + + await evmRevert( + registry + .connect(keeper1) + .performUpkeep(id, '0x', { gasLimit: BigNumber.from('120000') }), + ) + }) + + it('executes the data passed to the registry', async () => { + await mock.setCanPerform(true) + + const performData = '0xc0ffeec0ffee' + const tx = await registry + .connect(keeper1) + .performUpkeep(id, performData, { gasLimit: extraGas }) + const receipt = await tx.wait() + const eventLog = receipt?.events + + assert.equal(eventLog?.length, 2) + assert.equal(eventLog?.[1].event, 'UpkeepPerformed') + assert.equal(eventLog?.[1].args?.[0].toNumber(), id.toNumber()) + assert.equal(eventLog?.[1].args?.[1], true) + assert.equal(eventLog?.[1].args?.[2], await keeper1.getAddress()) + assert.isNotEmpty(eventLog?.[1].args?.[3]) + assert.equal(eventLog?.[1].args?.[4], performData) + }) + + it('updates payment balances', async () => { + const keeperBefore = await registry.getKeeperInfo( + await keeper1.getAddress(), + ) + const registrationBefore = await registry.getUpkeep(id) + const keeperLinkBefore = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkBefore = await linkToken.balanceOf(registry.address) + + // Do the thing + await registry.connect(keeper1).performUpkeep(id, '0x') + + const keeperAfter = await registry.getKeeperInfo( + await keeper1.getAddress(), + ) + const registrationAfter = await registry.getUpkeep(id) + const keeperLinkAfter = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkAfter = await linkToken.balanceOf(registry.address) + + assert.isTrue(keeperAfter.balance.gt(keeperBefore.balance)) + assert.isTrue(registrationBefore.balance.gt(registrationAfter.balance)) + assert.isTrue(keeperLinkAfter.eq(keeperLinkBefore)) + assert.isTrue(registryLinkBefore.eq(registryLinkAfter)) + }) + + it('only pays for gas used [ @skip-coverage ]', async () => { + const before = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const tx = await registry.connect(keeper1).performUpkeep(id, '0x') + const receipt = await tx.wait() + const after = (await registry.getKeeperInfo(await keeper1.getAddress())) + .balance + + const max = linkForGas(executeGas.toNumber()) + const totalTx = linkForGas(receipt.gasUsed.toNumber()) + const difference = after.sub(before) + assert.isTrue(max.gt(totalTx)) + assert.isTrue(totalTx.gt(difference)) + assert.isTrue(linkForGas(5700).lt(difference)) // exact number is flaky + assert.isTrue(linkForGas(6000).gt(difference)) // instead test a range + }) + + it('only pays at a rate up to the gas ceiling [ @skip-coverage ]', async () => { + const multiplier = BigNumber.from(10) + const gasPrice = BigNumber.from('1000000000') // 10M x the gas feed's rate + await registry + .connect(owner) + .setConfig( + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + maxCheckGas, + stalenessSeconds, + multiplier, + fallbackGasPrice, + fallbackLinkPrice, + ) + + const before = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const tx = await registry + .connect(keeper1) + .performUpkeep(id, '0x', { gasPrice }) + const receipt = await tx.wait() + const after = (await registry.getKeeperInfo(await keeper1.getAddress())) + .balance + + const max = linkForGas(executeGas).mul(multiplier) + const totalTx = linkForGas(receipt.gasUsed).mul(multiplier) + const difference = after.sub(before) + assert.isTrue(max.gt(totalTx)) + assert.isTrue(totalTx.gt(difference)) + assert.isTrue(linkForGas(5700).mul(multiplier).lt(difference)) + assert.isTrue(linkForGas(6000).mul(multiplier).gt(difference)) + }) + + it('only pays as much as the node spent [ @skip-coverage ]', async () => { + const multiplier = BigNumber.from(10) + const gasPrice = BigNumber.from(200) // 2X the gas feed's rate + const effectiveMultiplier = BigNumber.from(2) + await registry + .connect(owner) + .setConfig( + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + maxCheckGas, + stalenessSeconds, + multiplier, + fallbackGasPrice, + fallbackLinkPrice, + ) + + const before = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const tx = await registry + .connect(keeper1) + .performUpkeep(id, '0x', { gasPrice }) + const receipt = await tx.wait() + const after = (await registry.getKeeperInfo(await keeper1.getAddress())) + .balance + + const max = linkForGas(executeGas.toNumber()).mul(effectiveMultiplier) + const totalTx = linkForGas(receipt.gasUsed).mul(effectiveMultiplier) + const difference = after.sub(before) + assert.isTrue(max.gt(totalTx)) + assert.isTrue(totalTx.gt(difference)) + assert.isTrue(linkForGas(5700).mul(effectiveMultiplier).lt(difference)) + assert.isTrue(linkForGas(6000).mul(effectiveMultiplier).gt(difference)) + }) + + it('pays the caller even if the target function fails', async () => { + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const id = await getUpkeepID(tx) + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + const keeperBalanceBefore = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + + // Do the thing + await registry.connect(keeper1).performUpkeep(id, '0x') + + const keeperBalanceAfter = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + assert.isTrue(keeperBalanceAfter.gt(keeperBalanceBefore)) + }) + + it('reverts if called by a non-keeper', async () => { + await evmRevert( + registry.connect(nonkeeper).performUpkeep(id, '0x'), + 'only active keepers', + ) + }) + + it('reverts if the upkeep has been canceled', async () => { + await mock.setCanPerform(true) + + await registry.connect(owner).cancelUpkeep(id) + + await evmRevert( + registry.connect(keeper1).performUpkeep(id, '0x'), + 'invalid upkeep id', + ) + }) + + it('uses the fallback gas price if the feed price is stale [ @skip-coverage ]', async () => { + const normalAmount = await getPerformPaymentAmount() + const roundId = 99 + const answer = 100 + const updatedAt = 946684800 // New Years 2000 đŸĨŗ + const startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, answer, updatedAt, startedAt) + const amountWithStaleFeed = await getPerformPaymentAmount() + assert.isTrue(normalAmount.lt(amountWithStaleFeed)) + }) + + it('uses the fallback gas price if the feed price is non-sensical [ @skip-coverage ]', async () => { + const normalAmount = await getPerformPaymentAmount() + const roundId = 99 + const updatedAt = Math.floor(Date.now() / 1000) + const startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, -100, updatedAt, startedAt) + const amountWithNegativeFeed = await getPerformPaymentAmount() + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, 0, updatedAt, startedAt) + const amountWithZeroFeed = await getPerformPaymentAmount() + assert.isTrue(normalAmount.lt(amountWithNegativeFeed)) + assert.isTrue(normalAmount.lt(amountWithZeroFeed)) + }) + + it('uses the fallback if the link price feed is stale', async () => { + const normalAmount = await getPerformPaymentAmount() + const roundId = 99 + const answer = 100 + const updatedAt = 946684800 // New Years 2000 đŸĨŗ + const startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, answer, updatedAt, startedAt) + const amountWithStaleFeed = await getPerformPaymentAmount() + assert.isTrue(normalAmount.lt(amountWithStaleFeed)) + }) + + it('uses the fallback link price if the feed price is non-sensical', async () => { + const normalAmount = await getPerformPaymentAmount() + const roundId = 99 + const updatedAt = Math.floor(Date.now() / 1000) + const startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, -100, updatedAt, startedAt) + const amountWithNegativeFeed = await getPerformPaymentAmount() + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, 0, updatedAt, startedAt) + const amountWithZeroFeed = await getPerformPaymentAmount() + assert.isTrue(normalAmount.lt(amountWithNegativeFeed)) + assert.isTrue(normalAmount.lt(amountWithZeroFeed)) + }) + + it('reverts if the same caller calls twice in a row', async () => { + await registry.connect(keeper1).performUpkeep(id, '0x') + await evmRevert( + registry.connect(keeper1).performUpkeep(id, '0x'), + 'keepers must take turns', + ) + await registry.connect(keeper2).performUpkeep(id, '0x') + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'keepers must take turns', + ) + await registry.connect(keeper1).performUpkeep(id, '0x') + }) + + it('has a large enough gas overhead to cover upkeeps that use all their gas [ @skip-coverage ]', async () => { + await mock.setPerformGasToBurn(executeGas) + await mock.setCanPerform(true) + const gas = executeGas.add(PERFORM_GAS_OVERHEAD) + const performData = '0xc0ffeec0ffee' + const tx = await registry + .connect(keeper1) + .performUpkeep(id, performData, { gasLimit: gas }) + const receipt = await tx.wait() + const eventLog = receipt?.events + + assert.equal(eventLog?.length, 2) + assert.equal(eventLog?.[1].event, 'UpkeepPerformed') + assert.equal(eventLog?.[1].args?.[0].toNumber(), id.toNumber()) + assert.equal(eventLog?.[1].args?.[1], true) + assert.equal(eventLog?.[1].args?.[2], await keeper1.getAddress()) + assert.isNotEmpty(eventLog?.[1].args?.[3]) + assert.equal(eventLog?.[1].args?.[4], performData) + }) + + it('can self fund', async () => { + const autoFunderUpkeep = await upkeepAutoFunderFactory + .connect(owner) + .deploy(linkToken.address, registry.address) + const tx = await registry + .connect(owner) + .registerUpkeep( + autoFunderUpkeep.address, + executeGas, + autoFunderUpkeep.address, + emptyBytes, + ) + const upkeepID = await getUpkeepID(tx) + await autoFunderUpkeep.setUpkeepId(upkeepID) + // Give enough funds for upkeep as well as to the upkeep contract + await linkToken.connect(owner).approve(registry.address, toWei('1000')) + await linkToken + .connect(owner) + .transfer(autoFunderUpkeep.address, toWei('1000')) + let maxPayment = await registry.getMaxPaymentForGas(executeGas) + + // First set auto funding amount to 0 and verify that balance is deducted upon performUpkeep + let initialBalance = toWei('100') + await registry.connect(owner).addFunds(upkeepID, initialBalance) + await autoFunderUpkeep.setAutoFundLink(0) + await autoFunderUpkeep.setIsEligible(true) + await registry.connect(keeper1).performUpkeep(upkeepID, '0x') + + let postUpkeepBalance = (await registry.getUpkeep(upkeepID)).balance + assert.isTrue(postUpkeepBalance.lt(initialBalance)) // Balance should be deducted + assert.isTrue(postUpkeepBalance.gte(initialBalance.sub(maxPayment))) // Balance should not be deducted more than maxPayment + + // Now set auto funding amount to 100 wei and verify that the balance increases + initialBalance = postUpkeepBalance + let autoTopupAmount = toWei('100') + await autoFunderUpkeep.setAutoFundLink(autoTopupAmount) + await autoFunderUpkeep.setIsEligible(true) + await registry.connect(keeper2).performUpkeep(upkeepID, '0x') + + postUpkeepBalance = (await registry.getUpkeep(upkeepID)).balance + // Balance should increase by autoTopupAmount and decrease by max maxPayment + assert.isTrue( + postUpkeepBalance.gte( + initialBalance.add(autoTopupAmount).sub(maxPayment), + ), + ) + }) + + it('can self cancel', async () => { + const autoFunderUpkeep = await upkeepAutoFunderFactory + .connect(owner) + .deploy(linkToken.address, registry.address) + const tx = await registry + .connect(owner) + .registerUpkeep( + autoFunderUpkeep.address, + executeGas, + autoFunderUpkeep.address, + emptyBytes, + ) + const upkeepID = await getUpkeepID(tx) + await autoFunderUpkeep.setUpkeepId(upkeepID) + + await linkToken.connect(owner).approve(registry.address, toWei('1000')) + await registry.connect(owner).addFunds(upkeepID, toWei('100')) + await autoFunderUpkeep.setIsEligible(true) + await autoFunderUpkeep.setShouldCancel(true) + + let registration = await registry.getUpkeep(upkeepID) + const oldExpiration = registration.maxValidBlocknumber + + // Do the thing + await registry.connect(keeper1).performUpkeep(upkeepID, '0x') + + // Verify upkeep gets cancelled + registration = await registry.getUpkeep(upkeepID) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) + }) + }) + + describe('#withdrawFunds', () => { + beforeEach(async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + await registry.connect(keeper1).addFunds(id, toWei('1')) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry + .connect(owner) + .withdrawFunds(id.add(1).toNumber(), await payee1.getAddress()), + 'only callable by admin', + ) + }) + + it('reverts if called on an uncanceled upkeep', async () => { + await evmRevert( + registry.connect(admin).withdrawFunds(id, await payee1.getAddress()), + 'upkeep must be canceled', + ) + }) + + it('reverts if called with the 0 address', async () => { + await evmRevert( + registry.connect(admin).withdrawFunds(id, zeroAddress), + 'cannot send to zero address', + ) + }) + + describe('after the registration is cancelled', () => { + beforeEach(async () => { + await registry.connect(owner).cancelUpkeep(id) + }) + + it('moves the funds out and updates the balance', async () => { + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const registryBefore = await linkToken.balanceOf(registry.address) + + let registration = await registry.getUpkeep(id) + assert.isTrue(toWei('1').eq(registration.balance)) + + await registry + .connect(admin) + .withdrawFunds(id, await payee1.getAddress()) + + const payee1After = await linkToken.balanceOf(await payee1.getAddress()) + const registryAfter = await linkToken.balanceOf(registry.address) + + assert.isTrue(payee1Before.add(toWei('1')).eq(payee1After)) + assert.isTrue(registryBefore.sub(toWei('1')).eq(registryAfter)) + + registration = await registry.getUpkeep(id) + assert.equal(0, registration.balance.toNumber()) + }) + }) + }) + + describe('#cancelUpkeep', () => { + it('reverts if the ID is not valid', async () => { + await evmRevert( + registry.connect(owner).cancelUpkeep(id.add(1).toNumber()), + 'too late to cancel upkeep', + ) + }) + + it('reverts if called by a non-owner/non-admin', async () => { + await evmRevert( + registry.connect(keeper1).cancelUpkeep(id), + 'only owner or admin', + ) + }) + + describe('when called by the owner', async () => { + it('sets the registration to invalid immediately', async () => { + const tx = await registry.connect(owner).cancelUpkeep(id) + const receipt = await tx.wait() + const registration = await registry.getUpkeep(id) + assert.equal( + registration.maxValidBlocknumber.toNumber(), + receipt.blockNumber, + ) + }) + + it('emits an event', async () => { + const tx = await registry.connect(owner).cancelUpkeep(id) + const receipt = await tx.wait() + await expect(tx) + .to.emit(registry, 'UpkeepCanceled') + .withArgs(id, BigNumber.from(receipt.blockNumber)) + }) + + it('updates the canceled registrations list', async () => { + let canceled = await registry.callStatic.getCanceledUpkeepList() + assert.deepEqual([], canceled) + + await registry.connect(owner).cancelUpkeep(id) + + canceled = await registry.callStatic.getCanceledUpkeepList() + assert.deepEqual([id], canceled) + }) + + it('immediately prevents upkeep', async () => { + await registry.connect(owner).cancelUpkeep(id) + + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'invalid upkeep id', + ) + }) + + it('does not revert if reverts if called multiple times', async () => { + await registry.connect(owner).cancelUpkeep(id) + await evmRevert( + registry.connect(owner).cancelUpkeep(id), + 'too late to cancel upkeep', + ) + }) + + describe('when called by the owner when the admin has just canceled', () => { + let oldExpiration: BigNumber + + beforeEach(async () => { + await registry.connect(admin).cancelUpkeep(id) + const registration = await registry.getUpkeep(id) + oldExpiration = registration.maxValidBlocknumber + }) + + it('allows the owner to cancel it more quickly', async () => { + await registry.connect(owner).cancelUpkeep(id) + + const registration = await registry.getUpkeep(id) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) + }) + }) + + describe('when called by the admin', async () => { + const delay = 50 + + it('sets the registration to invalid in 50 blocks', async () => { + const tx = await registry.connect(admin).cancelUpkeep(id) + const receipt = await tx.wait() + const registration = await registry.getUpkeep(id) + assert.equal( + registration.maxValidBlocknumber.toNumber(), + receipt.blockNumber + 50, + ) + }) + + it('emits an event', async () => { + const tx = await registry.connect(admin).cancelUpkeep(id) + const receipt = await tx.wait() + await expect(tx) + .to.emit(registry, 'UpkeepCanceled') + .withArgs(id, BigNumber.from(receipt.blockNumber + delay)) + }) + + it('updates the canceled registrations list', async () => { + let canceled = await registry.callStatic.getCanceledUpkeepList() + assert.deepEqual([], canceled) + + await registry.connect(admin).cancelUpkeep(id) + + canceled = await registry.callStatic.getCanceledUpkeepList() + assert.deepEqual([id], canceled) + }) + + it('immediately prevents upkeep', async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + await registry.connect(admin).cancelUpkeep(id) + await registry.connect(keeper2).performUpkeep(id, '0x') // still works + + for (let i = 0; i < delay; i++) { + await ethers.provider.send('evm_mine', []) + } + + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'invalid upkeep id', + ) + }) + + it('reverts if called again by the admin', async () => { + await registry.connect(admin).cancelUpkeep(id) + + await evmRevert( + registry.connect(admin).cancelUpkeep(id), + 'too late to cancel upkeep', + ) + }) + + it('does not revert or double add the cancellation record if called by the owner immediately after', async () => { + await registry.connect(admin).cancelUpkeep(id) + + await registry.connect(owner).cancelUpkeep(id) + + const canceled = await registry.callStatic.getCanceledUpkeepList() + assert.deepEqual([id], canceled) + }) + + it('reverts if called by the owner after the timeout', async () => { + await registry.connect(admin).cancelUpkeep(id) + + for (let i = 0; i < delay; i++) { + await ethers.provider.send('evm_mine', []) + } + + await evmRevert( + registry.connect(owner).cancelUpkeep(id), + 'too late to cancel upkeep', + ) + }) + }) + }) + + describe('#withdrawPayment', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + await registry.connect(keeper1).performUpkeep(id, '0x') + }) + + it('reverts if called by anyone but the payee', async () => { + await evmRevert( + registry + .connect(payee2) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ), + 'only callable by payee', + ) + }) + + it('reverts if called with the 0 address', async () => { + await evmRevert( + registry + .connect(payee2) + .withdrawPayment(await keeper1.getAddress(), zeroAddress), + 'cannot send to zero address', + ) + }) + + it('updates the balances', async () => { + const to = await nonkeeper.getAddress() + const keeperBefore = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const registrationBefore = (await registry.getUpkeep(id)).balance + const toLinkBefore = await linkToken.balanceOf(to) + const registryLinkBefore = await linkToken.balanceOf(registry.address) + + //// Do the thing + await registry + .connect(payee1) + .withdrawPayment(await keeper1.getAddress(), to) + + const keeperAfter = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const registrationAfter = (await registry.getUpkeep(id)).balance + const toLinkAfter = await linkToken.balanceOf(to) + const registryLinkAfter = await linkToken.balanceOf(registry.address) + + assert.isTrue(keeperAfter.eq(BigNumber.from(0))) + assert.isTrue(registrationBefore.eq(registrationAfter)) + assert.isTrue(toLinkBefore.add(keeperBefore).eq(toLinkAfter)) + assert.isTrue(registryLinkBefore.sub(keeperBefore).eq(registryLinkAfter)) + }) + + it('emits a log announcing the withdrawal', async () => { + const balance = (await registry.getKeeperInfo(await keeper1.getAddress())) + .balance + const tx = await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + await expect(tx) + .to.emit(registry, 'PaymentWithdrawn') + .withArgs( + await keeper1.getAddress(), + balance, + await nonkeeper.getAddress(), + await payee1.getAddress(), + ) + }) + }) + + describe('#transferPayeeship', () => { + it('reverts when called by anyone but the current payee', async () => { + await evmRevert( + registry + .connect(payee2) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ), + 'only callable by payee', + ) + }) + + it('reverts when transferring to self', async () => { + await evmRevert( + registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee1.getAddress(), + ), + 'cannot transfer to self', + ) + }) + + it('does not change the payee', async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + + const info = await registry.getKeeperInfo(await keeper1.getAddress()) + assert.equal(await payee1.getAddress(), info.payee) + }) + + it('emits an event announcing the new payee', async () => { + const tx = await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + await expect(tx) + .to.emit(registry, 'PayeeshipTransferRequested') + .withArgs( + await keeper1.getAddress(), + await payee1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('does not emit an event when called with the same proposal', async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + + const tx = await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + const receipt = await tx.wait() + assert.equal(0, receipt.logs.length) + }) + }) + + describe('#acceptPayeeship', () => { + beforeEach(async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('reverts when called by anyone but the proposed payee', async () => { + await evmRevert( + registry.connect(payee1).acceptPayeeship(await keeper1.getAddress()), + 'only callable by proposed payee', + ) + }) + + it('emits an event announcing the new payee', async () => { + const tx = await registry + .connect(payee2) + .acceptPayeeship(await keeper1.getAddress()) + await expect(tx) + .to.emit(registry, 'PayeeshipTransferred') + .withArgs( + await keeper1.getAddress(), + await payee1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('does change the payee', async () => { + await registry.connect(payee2).acceptPayeeship(await keeper1.getAddress()) + + const info = await registry.getKeeperInfo(await keeper1.getAddress()) + assert.equal(await payee2.getAddress(), info.payee) + }) + }) + + describe('#setConfig', () => { + const payment = BigNumber.from(1) + const flatFee = BigNumber.from(2) + const checks = BigNumber.from(3) + const staleness = BigNumber.from(4) + const ceiling = BigNumber.from(5) + const maxGas = BigNumber.from(6) + const fbGasEth = BigNumber.from(7) + const fbLinkEth = BigNumber.from(8) + + it('reverts when called by anyone but the proposed owner', async () => { + await evmRevert( + registry + .connect(payee1) + .setConfig( + payment, + flatFee, + checks, + maxGas, + staleness, + gasCeilingMultiplier, + fbGasEth, + fbLinkEth, + ), + 'Only callable by owner', + ) + }) + + it('updates the config', async () => { + const old = await registry.getConfig() + const oldFlatFee = await registry.getFlatFee() + assert.isTrue(paymentPremiumPPB.eq(old.paymentPremiumPPB)) + assert.isTrue(flatFeeMicroLink.eq(oldFlatFee)) + assert.isTrue(blockCountPerTurn.eq(old.blockCountPerTurn)) + assert.isTrue(stalenessSeconds.eq(old.stalenessSeconds)) + assert.isTrue(gasCeilingMultiplier.eq(old.gasCeilingMultiplier)) + + await registry + .connect(owner) + .setConfig( + payment, + flatFee, + checks, + maxGas, + staleness, + ceiling, + fbGasEth, + fbLinkEth, + ) + + const updated = await registry.getConfig() + const newFlatFee = await registry.getFlatFee() + assert.equal(updated.paymentPremiumPPB, payment.toNumber()) + assert.equal(newFlatFee, flatFee.toNumber()) + assert.equal(updated.blockCountPerTurn, checks.toNumber()) + assert.equal(updated.stalenessSeconds, staleness.toNumber()) + assert.equal(updated.gasCeilingMultiplier, ceiling.toNumber()) + assert.equal(updated.checkGasLimit, maxGas.toNumber()) + assert.equal(updated.fallbackGasPrice.toNumber(), fbGasEth.toNumber()) + assert.equal(updated.fallbackLinkPrice.toNumber(), fbLinkEth.toNumber()) + }) + + it('emits an event', async () => { + const tx = await registry + .connect(owner) + .setConfig( + payment, + flatFee, + checks, + maxGas, + staleness, + ceiling, + fbGasEth, + fbLinkEth, + ) + await expect(tx) + .to.emit(registry, 'ConfigSet') + .withArgs( + payment, + checks, + maxGas, + staleness, + ceiling, + fbGasEth, + fbLinkEth, + ) + }) + }) + + describe('#onTokenTransfer', () => { + const amount = toWei('1') + + it('reverts if not called by the LINK token', async () => { + const data = ethers.utils.defaultAbiCoder.encode( + ['uint256'], + [id.toNumber().toString()], + ) + + await evmRevert( + registry + .connect(keeper1) + .onTokenTransfer(await keeper1.getAddress(), amount, data), + 'only callable through LINK', + ) + }) + + it('reverts if not called with more or less than 32 bytes', async () => { + const longData = ethers.utils.defaultAbiCoder.encode( + ['uint256', 'uint256'], + ['33', '34'], + ) + const shortData = '0x12345678' + + await evmRevert( + linkToken + .connect(owner) + .transferAndCall(registry.address, amount, longData), + ) + await evmRevert( + linkToken + .connect(owner) + .transferAndCall(registry.address, amount, shortData), + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(id) + await evmRevert( + registry.connect(keeper1).addFunds(id, amount), + 'upkeep must be active', + ) + }) + + it('updates the funds of the job id passed', async () => { + const data = ethers.utils.defaultAbiCoder.encode( + ['uint256'], + [id.toNumber().toString()], + ) + + const before = (await registry.getUpkeep(id)).balance + await linkToken + .connect(owner) + .transferAndCall(registry.address, amount, data) + const after = (await registry.getUpkeep(id)).balance + + assert.isTrue(before.add(amount).eq(after)) + }) + }) + + describe('#recoverFunds', () => { + const sent = toWei('7') + + beforeEach(async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + + // add funds to upkeep 1 and perform and withdraw some payment + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const id1 = await getUpkeepID(tx) + await registry.connect(keeper1).addFunds(id1, toWei('5')) + await registry.connect(keeper1).performUpkeep(id1, '0x') + await registry.connect(keeper2).performUpkeep(id1, '0x') + await registry.connect(keeper3).performUpkeep(id1, '0x') + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + + // transfer funds directly to the registry + await linkToken.connect(keeper1).transfer(registry.address, sent) + + // add funds to upkeep 2 and perform and withdraw some payment + const tx2 = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const id2 = await getUpkeepID(tx2) + await registry.connect(keeper1).addFunds(id2, toWei('5')) + await registry.connect(keeper1).performUpkeep(id2, '0x') + await registry.connect(keeper2).performUpkeep(id2, '0x') + await registry.connect(keeper3).performUpkeep(id2, '0x') + await registry + .connect(payee2) + .withdrawPayment( + await keeper2.getAddress(), + await nonkeeper.getAddress(), + ) + + // transfer funds using onTokenTransfer + const data = ethers.utils.defaultAbiCoder.encode( + ['uint256'], + [id2.toNumber().toString()], + ) + await linkToken + .connect(owner) + .transferAndCall(registry.address, toWei('1'), data) + + // remove a keeper + await registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper2.getAddress()], + [await payee1.getAddress(), await payee2.getAddress()], + ) + + // withdraw some funds + await registry.connect(owner).cancelUpkeep(id1) + await registry.connect(admin).withdrawFunds(id1, await admin.getAddress()) + }) + + it('reverts if not called by owner', async () => { + await evmRevert( + registry.connect(keeper1).recoverFunds(), + 'Only callable by owner', + ) + }) + + it('allows any funds that have been accidentally transfered to be moved', async () => { + const balanceBefore = await linkToken.balanceOf(registry.address) + + await linkToken.balanceOf(registry.address) + + await registry.connect(owner).recoverFunds() + const balanceAfter = await linkToken.balanceOf(registry.address) + assert.isTrue(balanceBefore.eq(balanceAfter.add(sent))) + }) + }) + + describe('#pause', () => { + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry.connect(keeper1).pause(), + 'Only callable by owner', + ) + }) + + it('marks the contract as paused', async () => { + assert.isFalse(await registry.paused()) + + await registry.connect(owner).pause() + + assert.isTrue(await registry.paused()) + }) + }) + + describe('#unpause', () => { + beforeEach(async () => { + await registry.connect(owner).pause() + }) + + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry.connect(keeper1).unpause(), + 'Only callable by owner', + ) + }) + + it('marks the contract as not paused', async () => { + assert.isTrue(await registry.paused()) + + await registry.connect(owner).unpause() + + assert.isFalse(await registry.paused()) + }) + }) + + describe('#getMaxPaymentForGas', () => { + const gasAmounts = [100000, 10000000] + const premiums = [0, 250000000] + const flatFees = [0, 1000000] + it('calculates the max fee approptiately', async () => { + for (let idx = 0; idx < gasAmounts.length; idx++) { + const gas = gasAmounts[idx] + for (let jdx = 0; jdx < premiums.length; jdx++) { + const premium = premiums[jdx] + for (let kdx = 0; kdx < flatFees.length; kdx++) { + const flatFee = flatFees[kdx] + await registry + .connect(owner) + .setConfig( + premium, + flatFee, + blockCountPerTurn, + maxCheckGas, + stalenessSeconds, + gasCeilingMultiplier, + fallbackGasPrice, + fallbackLinkPrice, + ) + const price = await registry.getMaxPaymentForGas(gas) + expect(price).to.equal(linkForGas(gas, premium, flatFee)) + } + } + } + }) + }) + + describe('#checkUpkeep / #performUpkeep', () => { + const performData = '0xc0ffeec0ffee' + const multiplier = BigNumber.from(10) + const flatFee = BigNumber.from('100000') //0.1 LINK + const callGasPrice = 1 + + it('uses the same minimum balance calculation [ @skip-coverage ]', async () => { + await registry + .connect(owner) + .setConfig( + paymentPremiumPPB, + flatFee, + blockCountPerTurn, + maxCheckGas, + stalenessSeconds, + multiplier, + fallbackGasPrice, + fallbackLinkPrice, + ) + await linkToken.connect(owner).approve(registry.address, toWei('100')) + + const tx1 = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const upkeepID1 = await getUpkeepID(tx1) + const tx2 = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const upkeepID2 = await getUpkeepID(tx2) + await mock.setCanCheck(true) + await mock.setCanPerform(true) + // upkeep 1 is underfunded, 2 is funded + const minBalance1 = (await registry.getMaxPaymentForGas(executeGas)).sub( + 1, + ) + const minBalance2 = await registry.getMaxPaymentForGas(executeGas) + await registry.connect(owner).addFunds(upkeepID1, minBalance1) + await registry.connect(owner).addFunds(upkeepID2, minBalance2) + // upkeep 1 check should revert, 2 should succeed + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepID1, await keeper1.getAddress(), { + gasPrice: callGasPrice, + }), + ) + await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepID2, await keeper1.getAddress(), { + gasPrice: callGasPrice, + }) + // upkeep 1 perform should revert, 2 should succeed + await evmRevert( + registry + .connect(keeper1) + .performUpkeep(upkeepID1, performData, { gasLimit: extraGas }), + 'insufficient funds', + ) + await registry + .connect(keeper1) + .performUpkeep(upkeepID2, performData, { gasLimit: extraGas }) + }) + }) + + describe('#getMinBalanceForUpkeep / #checkUpkeep', () => { + it('calculates the minimum balance appropriately', async () => { + const oneWei = BigNumber.from('1') + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + await mock.setCanCheck(true) + await mock.setCanPerform(true) + const minBalance = await registry.getMinBalanceForUpkeep(id) + const tooLow = minBalance.sub(oneWei) + await registry.connect(keeper1).addFunds(id, tooLow) + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'insufficient funds', + ) + await registry.connect(keeper1).addFunds(id, oneWei) + await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()) + }) + }) +}) diff --git a/contracts/test/v0.8/Cron.test.ts b/contracts/test/v0.8/Cron.test.ts index d78d7fdf136..0b7bbd2f5db 100644 --- a/contracts/test/v0.8/Cron.test.ts +++ b/contracts/test/v0.8/Cron.test.ts @@ -47,7 +47,7 @@ describe('Cron', () => { }) describe('encodeCronString() / encodedSpecToString()', () => { - it('converts all valid cron strings to structs and back', async () => { + it('converts all valid cron strings to encoded structs and back', async () => { const tests = validCrons.map(async (input) => { const spec = await cron.encodeCronString(input) const output = await cron.encodedSpecToString(spec) diff --git a/contracts/test/v0.8/CronUpkeep.test.ts b/contracts/test/v0.8/CronUpkeep.test.ts index f25eb396a0b..e942dacb328 100644 --- a/contracts/test/v0.8/CronUpkeep.test.ts +++ b/contracts/test/v0.8/CronUpkeep.test.ts @@ -3,6 +3,9 @@ import { ethers } from 'hardhat' import { Contract } from 'ethers' import { assert, expect } from 'chai' import { CronUpkeepTestHelper } from '../../typechain/CronUpkeepTestHelper' +import { CronUpkeepDelegate } from '../../typechain/CronUpkeepDelegate' +import { CronUpkeepFactory } from '../../typechain/CronUpkeepFactory' +import { CronUpkeepTestHelper__factory as CronUpkeepTestHelperFactory } from '../../typechain/factories/CronUpkeepTestHelper__factory' import { CronInternalTestHelper } from '../../typechain/CronInternalTestHelper' import { CronReceiver } from '../../typechain/CronReceiver' import { BigNumber, BigNumberish } from '@ethersproject/bignumber' @@ -18,6 +21,9 @@ const CALL_FAILED_ERR = 'CallFailed' const CRON_NOT_FOUND_ERR = 'CronJobIDNotFound' let cron: CronUpkeepTestHelper +let cronFactory: CronUpkeepTestHelperFactory // the typechain factory that deploys cron upkeep contracts +let cronFactoryContract: CronUpkeepFactory // the cron factory contract +let cronDelegate: CronUpkeepDelegate let cronTestHelper: CronInternalTestHelper let cronReceiver1: CronReceiver let cronReceiver2: CronReceiver @@ -27,6 +33,7 @@ let owner: SignerWithAddress let stranger: SignerWithAddress const timeStamp = 32503680000 // Jan 1, 3000 12:00AM +const basicCronString = '0 * * * *' let handler1Sig: string let handler2Sig: string @@ -73,22 +80,24 @@ describe('CronUpkeep', () => { 'CronUpkeepDelegate', admin, ) - const cronDelegate = await cronDelegateFactory.deploy() + cronDelegate = await cronDelegateFactory.deploy() const cronExternalFactory = await ethers.getContractFactory( 'src/v0.8/libraries/external/Cron.sol:Cron', admin, ) const cronExternalLib = await cronExternalFactory.deploy() - const cronFactory = await ethers.getContractFactory( - 'CronUpkeepTestHelper', - { - signer: admin, - libraries: { Cron: cronExternalLib.address }, - }, - ) + cronFactory = await ethers.getContractFactory('CronUpkeepTestHelper', { + signer: admin, + libraries: { Cron: cronExternalLib.address }, + }) cron = ( - await cronFactory.deploy(owner.address, cronDelegate.address) + await cronFactory.deploy(owner.address, cronDelegate.address, 5, []) ).connect(owner) + const cronFactoryContractFactory = await ethers.getContractFactory( + 'CronUpkeepFactory', + { signer: admin, libraries: { Cron: cronExternalLib.address } }, + ) // the typechain factory that creates the cron factory contract + cronFactoryContract = await cronFactoryContractFactory.deploy() const fs = cronReceiver1.interface.functions handler1Sig = utils.id(fs['handler1()'].format('sighash')).slice(0, 10) // TODO this seems like an ethers bug handler2Sig = utils.id(fs['handler2()'].format('sighash')).slice(0, 10) @@ -99,7 +108,7 @@ describe('CronUpkeep', () => { 'CronInternalTestHelper', ) cronTestHelper = await cronTHFactory.deploy() - basicSpec = await cron.cronStringToEncodedSpec('0 * * * *') + basicSpec = await cronFactoryContract.encodeCronString(basicCronString) }) afterEach(async () => { @@ -111,13 +120,14 @@ describe('CronUpkeep', () => { // and typechain. Remove once the version issue is resolved. // https://app.shortcut.com/chainlinklabs/story/21905/remove-contract-cast-in-cronupkeep-test-ts h.publicAbi(cron as unknown as Contract, [ + 's_maxJobs', 'performUpkeep', 'createCronJobFromEncodedSpec', + 'updateCronJob', 'deleteCronJob', 'checkUpkeep', 'getActiveCronJobIDs', 'getCronJob', - 'cronStringToEncodedSpec', // Ownable methods: 'acceptOwnership', 'owner', @@ -133,8 +143,29 @@ describe('CronUpkeep', () => { }) describe('constructor()', () => { - it('sets the owner to the address provided', async () => { + it('sets the initial values', async () => { expect(await cron.owner()).to.equal(owner.address) + expect(await cron.s_maxJobs()).to.equal(5) + }) + + it('optionally creates a first job', async () => { + const payload = await cronFactoryContract.encodeCronJob( + cronReceiver1.address, + handler1Sig, + basicCronString, + ) + cron = ( + await cronFactory.deploy( + owner.address, + cronDelegate.address, + 5, + payload, + ) + ).connect(owner) + const job = await cron.getCronJob(1) + assert.equal(job.target, cronReceiver1.address) + assert.equal(job.handler, handler1Sig) + assert.equal(job.cronString, basicCronString) }) }) @@ -292,8 +323,12 @@ describe('CronUpkeep', () => { it('creates jobs with sequential IDs', async () => { const cronString1 = '0 * * * *' const cronString2 = '0 1,2,3 */4 5-6 1-2' - const encodedSpec1 = await cron.cronStringToEncodedSpec(cronString1) - const encodedSpec2 = await cron.cronStringToEncodedSpec(cronString2) + const encodedSpec1 = await cronFactoryContract.encodeCronString( + cronString1, + ) + const encodedSpec2 = await cronFactoryContract.encodeCronString( + cronString2, + ) const nextTick1 = ( await cronTestHelper.calculateNextTick(cronString1) ).toNumber() @@ -351,17 +386,79 @@ describe('CronUpkeep', () => { }) it('is only callable by the owner', async () => { - const encodedSpec = await cron.cronStringToEncodedSpec('0 * * * *') await expect( cron .connect(stranger) .createCronJobFromEncodedSpec( cronReceiver1.address, handler1Sig, - encodedSpec, + basicSpec, ), ).to.be.revertedWith(OWNABLE_ERR) }) + + it('errors if trying to create more jobs than allowed', async () => { + for (let idx = 0; idx < 5; idx++) { + await createBasicCron() + } + await expect(createBasicCron()).to.be.revertedWith('ExceedsMaxJobs') + }) + }) + + describe('updateCronJob()', () => { + const newCronString = '0 0 1 1 1' + let newEncodedSpec: string + beforeEach(async () => { + await createBasicCron() + newEncodedSpec = await cronFactoryContract.encodeCronString(newCronString) + }) + + it('updates a cron job', async () => { + let cron1 = await cron.getCronJob(1) + assert.equal(cron1.target, cronReceiver1.address) + assert.equal(cron1.handler, handler1Sig) + assert.equal(cron1.cronString, basicCronString) + await cron.updateCronJob( + 1, + cronReceiver2.address, + handler2Sig, + newEncodedSpec, + ) + cron1 = await cron.getCronJob(1) + assert.equal(cron1.target, cronReceiver2.address) + assert.equal(cron1.handler, handler2Sig) + assert.equal(cron1.cronString, newCronString) + }) + + it('emits an event', async () => { + await expect( + await cron.updateCronJob( + 1, + cronReceiver2.address, + handler2Sig, + newEncodedSpec, + ), + ).to.emit(cron, 'CronJobUpdated') + }) + + it('is only callable by the owner', async () => { + await expect( + cron + .connect(stranger) + .updateCronJob(1, cronReceiver2.address, handler2Sig, newEncodedSpec), + ).to.be.revertedWith(OWNABLE_ERR) + }) + + it('reverts if trying to update a non-existent ID', async () => { + await expect( + cron.updateCronJob( + 2, + cronReceiver2.address, + handler2Sig, + newEncodedSpec, + ), + ).to.be.revertedWith(CRON_NOT_FOUND_ERR) + }) }) describe('deleteCronJob()', () => { @@ -388,13 +485,6 @@ describe('CronUpkeep', () => { await expect(cron.deleteCronJob(1)).to.emit(cron, 'CronJobDeleted') }) - it('is only callable by the owner', async () => { - await createBasicCron() - await expect(cron.connect(stranger).deleteCronJob(1)).to.be.revertedWith( - OWNABLE_ERR, - ) - }) - it('reverts if trying to delete a non-existent ID', async () => { await createBasicCron() await createBasicCron() @@ -420,7 +510,7 @@ describe('CronUpkeep', () => { }) // only run during yarn test:gas -describe('Cron Gas Usage', () => { +describe.skip('Cron Gas Usage', () => { before(async () => { const accounts = await ethers.getSigners() admin = accounts[0] @@ -444,7 +534,7 @@ describe('Cron Gas Usage', () => { libraries: { Cron: cronExternalLib.address }, }, ) - cron = await cronFactory.deploy(owner.address, cronDelegate.address) + cron = await cronFactory.deploy(owner.address, cronDelegate.address, 5, []) const fs = cronReceiver1.interface.functions handler1Sig = utils .id(fs['handler1()'].format('sighash')) // TODO this seems like an ethers bug diff --git a/contracts/test/v0.8/CronUpkeepFactory.test.ts b/contracts/test/v0.8/CronUpkeepFactory.test.ts index 835d425f5f8..55a1a7ddd38 100644 --- a/contracts/test/v0.8/CronUpkeepFactory.test.ts +++ b/contracts/test/v0.8/CronUpkeepFactory.test.ts @@ -4,18 +4,23 @@ import { assert, expect } from 'chai' import { CronUpkeepFactory } from '../../typechain/CronUpkeepFactory' import type { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' import { reset } from '../test-helpers/helpers' +import * as h from '../test-helpers/helpers' + +const OWNABLE_ERR = 'Only callable by owner' let cronExternalLib: Contract let factory: CronUpkeepFactory let admin: SignerWithAddress let owner: SignerWithAddress +let stranger: SignerWithAddress describe('CronUpkeepFactory', () => { beforeEach(async () => { const accounts = await ethers.getSigners() admin = accounts[0] owner = accounts[1] + stranger = accounts[2] const cronExternalFactory = await ethers.getContractFactory( 'src/v0.8/libraries/external/Cron.sol:Cron', admin, @@ -37,6 +42,22 @@ describe('CronUpkeepFactory', () => { await reset() }) + it('has a limited public ABI [ @skip-coverage ]', () => { + h.publicAbi(factory as unknown as Contract, [ + 's_maxJobs', + 'newCronUpkeep', + 'newCronUpkeepWithJob', + 'setMaxJobs', + 'cronDelegateAddress', + 'encodeCronString', + 'encodeCronJob', + // Ownable methods: + 'acceptOwnership', + 'owner', + 'transferOwnership', + ]) + }) + describe('constructor()', () => { it('deploys a delegate contract', async () => { assert.notEqual( @@ -69,4 +90,18 @@ describe('CronUpkeepFactory', () => { ) }) }) + + describe('setMaxJobs()', () => { + it('sets the max jobs value', async () => { + expect(await factory.s_maxJobs()).to.equal(5) + await factory.setMaxJobs(6) + expect(await factory.s_maxJobs()).to.equal(6) + }) + + it('is only callable by the owner', async () => { + await expect(factory.connect(stranger).setMaxJobs(6)).to.be.revertedWith( + OWNABLE_ERR, + ) + }) + }) }) diff --git a/contracts/test/v0.8/PermissionedForwardProxy.ts b/contracts/test/v0.8/PermissionedForwardProxy.ts new file mode 100644 index 00000000000..ef9129d7bd1 --- /dev/null +++ b/contracts/test/v0.8/PermissionedForwardProxy.ts @@ -0,0 +1,176 @@ +import { ethers } from 'hardhat' +import { publicAbi } from '../test-helpers/helpers' +import { expect, assert } from 'chai' +import { Contract, ContractFactory } from 'ethers' +import { Personas, getUsers } from '../test-helpers/setup' + +const PERMISSION_NOT_SET = 'PermissionNotSet' + +let personas: Personas + +let controllerFactory: ContractFactory +let counterFactory: ContractFactory +let controller: Contract +let counter: Contract + +before(async () => { + personas = (await getUsers()).personas + controllerFactory = await ethers.getContractFactory( + 'src/v0.8/PermissionedForwardProxy.sol:PermissionedForwardProxy', + personas.Carol, + ) + counterFactory = await ethers.getContractFactory( + 'src/v0.8/tests/Counter.sol:Counter', + personas.Carol, + ) +}) + +describe('PermissionedForwardProxy', () => { + beforeEach(async () => { + controller = await controllerFactory.connect(personas.Carol).deploy() + counter = await counterFactory.connect(personas.Carol).deploy() + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(controller, [ + 'forward', + 'setPermission', + 'removePermission', + 'getPermission', + // Owned + 'acceptOwnership', + 'owner', + 'transferOwnership', + ]) + }) + + describe('#setPermission', () => { + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + controller + .connect(personas.Eddy) + .setPermission( + await personas.Carol.getAddress(), + await personas.Eddy.getAddress(), + ), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('when called by the owner', () => { + it('adds the permission to the proxy', async () => { + const tx = await controller + .connect(personas.Carol) + .setPermission( + await personas.Carol.getAddress(), + await personas.Eddy.getAddress(), + ) + const receipt = await tx.wait() + const eventLog = receipt?.events + + assert.equal(eventLog?.length, 1) + assert.equal(eventLog?.[0].event, 'PermissionSet') + assert.equal(eventLog?.[0].args?.[0], await personas.Carol.getAddress()) + assert.equal(eventLog?.[0].args?.[1], await personas.Eddy.getAddress()) + + expect( + await controller.getPermission(await personas.Carol.getAddress()), + ).to.be.equal(await personas.Eddy.getAddress()) + }) + }) + }) + + describe('#removePermission', () => { + beforeEach(async () => { + // Add permission before testing + await controller + .connect(personas.Carol) + .setPermission( + await personas.Carol.getAddress(), + await personas.Eddy.getAddress(), + ) + }) + + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + controller + .connect(personas.Eddy) + .removePermission(await personas.Carol.getAddress()), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('when called by the owner', () => { + it('removes the permission to the proxy', async () => { + const tx = await controller + .connect(personas.Carol) + .removePermission(await personas.Carol.getAddress()) + + const receipt = await tx.wait() + const eventLog = receipt?.events + + assert.equal(eventLog?.length, 1) + assert.equal(eventLog?.[0].event, 'PermissionRemoved') + assert.equal(eventLog?.[0].args?.[0], await personas.Carol.getAddress()) + + expect( + await controller.getPermission(await personas.Carol.getAddress()), + ).to.be.equal(ethers.constants.AddressZero) + }) + }) + }) + + describe('#forward', () => { + describe('when permission does not exist', () => { + it('reverts', async () => { + await expect( + controller + .connect(personas.Carol) + .forward(await personas.Eddy.getAddress(), '0x'), + ).to.be.revertedWith(PERMISSION_NOT_SET) + }) + }) + + describe('when permission exists', () => { + beforeEach(async () => { + // Add permission before testing + await controller + .connect(personas.Carol) + .setPermission(await personas.Carol.getAddress(), counter.address) + }) + + it('calls target successfully', async () => { + await controller + .connect(personas.Carol) + .forward( + counter.address, + counter.interface.encodeFunctionData('increment'), + ) + + expect(await counter.count()).to.be.equal(1) + }) + + it('reverts when target reverts and bubbles up error', async () => { + await expect( + controller + .connect(personas.Carol) + .forward( + counter.address, + counter.interface.encodeFunctionData('alwaysRevertWithString'), + ), + ).to.be.revertedWith('always revert') // Revert strings should be bubbled up + + await expect( + controller + .connect(personas.Carol) + .forward( + counter.address, + counter.interface.encodeFunctionData('alwaysRevert'), + ), + ).to.be.reverted // Javascript VM not able to parse custom errors defined on another contract + }) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/ArbitrumSequencerUptimeFeed.test.ts b/contracts/test/v0.8/dev/ArbitrumSequencerUptimeFeed.test.ts index e46da8171bf..c5bf9eacb56 100644 --- a/contracts/test/v0.8/dev/ArbitrumSequencerUptimeFeed.test.ts +++ b/contracts/test/v0.8/dev/ArbitrumSequencerUptimeFeed.test.ts @@ -306,7 +306,7 @@ describe('ArbitrumSequencerUptimeFeed', () => { // Assert update expect(await arbitrumSequencerUptimeFeed.latestAnswer()).to.equal(1) expect(updateTx.cumulativeGasUsed.toNumber()).to.be.closeTo( - 93137, + 93015, gasUsedDeviation, ) }) diff --git a/core/.tool-versions b/core/.tool-versions index 3ad3b18aa75..4933a41e2e8 100644 --- a/core/.tool-versions +++ b/core/.tool-versions @@ -1 +1 @@ -golang 1.17.2 +golang 1.18 diff --git a/core/bridges/bridge_type.go b/core/bridges/bridge_type.go index ce9c2c76037..92bf7a323ed 100644 --- a/core/bridges/bridge_type.go +++ b/core/bridges/bridge_type.go @@ -143,10 +143,11 @@ func MarshalBridgeMetaData(latestAnswer *big.Int, updatedAt *big.Int) (map[strin // BridgeName defines what Adapter a TaskSpec will use. type BridgeName string +var bridgeNameRegex = regexp.MustCompile("^[a-zA-Z0-9-_]*$") + // ParseBridgeName returns a formatted Task type. func ParseBridgeName(val string) (BridgeName, error) { - re := regexp.MustCompile("^[a-zA-Z0-9-_]*$") - if !re.MatchString(val) { + if !bridgeNameRegex.MatchString(val) { return "", fmt.Errorf("task type validation: name %v contains invalid characters", val) } diff --git a/core/bridges/bridge_type_test.go b/core/bridges/bridge_type_test.go index b58f2e74ac4..d97bbec7360 100644 --- a/core/bridges/bridge_type_test.go +++ b/core/bridges/bridge_type_test.go @@ -1,6 +1,9 @@ package bridges_test import ( + "math/rand" + "strconv" + "strings" "testing" "github.com/smartcontractkit/chainlink/core/bridges" @@ -36,3 +39,23 @@ func TestBridgeType_Authenticate(t *testing.T) { }) } } + +func BenchmarkParseBridgeName(b *testing.B) { + const valid = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_` + for _, l := range []int{1, 10, 20, 50, 100, 1000, 10000} { + b.Run(strconv.Itoa(l), func(b *testing.B) { + var sb strings.Builder + for i := 0; i < l; i++ { + sb.WriteByte(valid[rand.Intn(len(valid))]) + } + name := sb.String() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := bridges.ParseBridgeName(name) + if err != nil { + b.Fatalf("failed to parse %q: %v\n", name, err) + } + } + }) + } +} diff --git a/core/bridges/mocks/orm.go b/core/bridges/mocks/orm.go index 53d0d0ed5ca..dd105570aba 100644 --- a/core/bridges/mocks/orm.go +++ b/core/bridges/mocks/orm.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/cbor/cbor.go b/core/cbor/cbor.go index 3b8c4b34a8f..590f45b4977 100644 --- a/core/cbor/cbor.go +++ b/core/cbor/cbor.go @@ -2,40 +2,36 @@ package cbor import ( "bytes" - "encoding/json" "fmt" "math/big" "github.com/fxamacker/cbor/v2" - "github.com/smartcontractkit/chainlink/core/store/models" + "github.com/pkg/errors" ) -// ParseDietCBOR attempts to coerce the input byte array into valid CBOR -// and then coerces it into a JSON object. +// ParseDietCBOR attempts to coerce the input byte array into valid CBOR. // Assumes the input is "diet" CBOR which is like CBOR, except: // 1. It is guaranteed to always be a map // 2. It may or may not include the opening and closing markers "{}" -func ParseDietCBOR(b []byte) (models.JSON, error) { +func ParseDietCBOR(b []byte) (map[string]interface{}, error) { b = autoAddMapDelimiters(b) var m map[interface{}]interface{} - if err := cbor.Unmarshal(b, &m); err != nil { - return models.JSON{}, err + return nil, err } coerced, err := CoerceInterfaceMapToStringMap(m) if err != nil { - return models.JSON{}, err + return nil, err } - jsb, err := json.Marshal(coerced) - if err != nil { - return models.JSON{}, err + output, ok := coerced.(map[string]interface{}) + if !ok { + return nil, errors.New("cbor data cannot be coerced to map") } - var js models.JSON - return js, json.Unmarshal(jsb, &js) + return output, nil } // ParseStandardCBOR parses CBOR in "standards compliant" mode. diff --git a/core/cbor/cbor_test.go b/core/cbor/cbor_test.go index fae61d3bc9f..dd58fecd37d 100644 --- a/core/cbor/cbor_test.go +++ b/core/cbor/cbor_test.go @@ -7,9 +7,10 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/fxamacker/cbor/v2" - "github.com/smartcontractkit/chainlink/core/store/models" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/core/internal/testutils" ) func Test_ParseCBOR(t *testing.T) { @@ -18,7 +19,7 @@ func Test_ParseCBOR(t *testing.T) { tests := []struct { name string in string - want models.JSON + want interface{} wantErrored bool }{ { @@ -70,7 +71,14 @@ func Test_ParseCBOR(t *testing.T) { // int(28948022309329048855892746252171976963317496166410141009864396001978282409983) "ff" + // primitive(*) "ff", // primitive(*) - jsonMustUnmarshal(t, `{"bignums":[18446744073709551616,28948022309329048855892746252171976963317496166410141009864396001978282409984,-18446744073709551617,-28948022309329048855892746252171976963317496166410141009864396001978282409984]}`), + map[string]interface{}{ + "bignums": []interface{}{ + testutils.MustParseBigInt(t, "18446744073709551616"), + testutils.MustParseBigInt(t, "28948022309329048855892746252171976963317496166410141009864396001978282409984"), + testutils.MustParseBigInt(t, "-18446744073709551617"), + testutils.MustParseBigInt(t, "-28948022309329048855892746252171976963317496166410141009864396001978282409984"), + }, + }, false, }, { @@ -96,12 +104,19 @@ func Test_ParseCBOR(t *testing.T) { "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // int(28948022309329048855892746252171976963317496166410141009864396001978282409983) "ff", // primitive(*) - jsonMustUnmarshal(t, `{"bignums":[18446744073709551616,28948022309329048855892746252171976963317496166410141009864396001978282409984,-18446744073709551617,-28948022309329048855892746252171976963317496166410141009864396001978282409984]}`), + map[string]interface{}{ + "bignums": []interface{}{ + testutils.MustParseBigInt(t, "18446744073709551616"), + testutils.MustParseBigInt(t, "28948022309329048855892746252171976963317496166410141009864396001978282409984"), + testutils.MustParseBigInt(t, "-18446744073709551617"), + testutils.MustParseBigInt(t, "-28948022309329048855892746252171976963317496166410141009864396001978282409984"), + }, + }, false, }, {"empty object", `0xa0`, jsonMustUnmarshal(t, `{}`), false}, {"empty string", `0x`, jsonMustUnmarshal(t, `{}`), false}, - {"invalid CBOR", `0xff`, models.JSON{}, true}, + {"invalid CBOR", `0xff`, jsonMustUnmarshal(t, `{}`), true}, } for _, test := range tests { @@ -177,8 +192,8 @@ func Test_autoAddMapDelimiters(t *testing.T) { } } -func jsonMustUnmarshal(t *testing.T, in string) models.JSON { - var j models.JSON +func jsonMustUnmarshal(t *testing.T, in string) interface{} { + var j interface{} err := json.Unmarshal([]byte(in), &j) require.NoError(t, err) return j @@ -252,9 +267,9 @@ func TestJSON_CBOR(t *testing.T) { tests := []struct { name string - in models.JSON + in interface{} }{ - {"empty object", models.JSON{}}, + {"empty object", jsonMustUnmarshal(t, `{}`)}, {"array", jsonMustUnmarshal(t, `[1,2,3,4]`)}, { "basic object", @@ -276,14 +291,14 @@ func TestJSON_CBOR(t *testing.T) { decoded, err = CoerceInterfaceMapToStringMap(decoded) require.NoError(t, err) - assert.True(t, reflect.DeepEqual(test.in.Result.Value(), decoded)) + assert.True(t, reflect.DeepEqual(test.in, decoded)) }) } } // mustMarshal returns a bytes array of the JSON map or array encoded to CBOR. -func mustMarshal(t *testing.T, j models.JSON) []byte { - switch v := j.Result.Value().(type) { +func mustMarshal(t *testing.T, j interface{}) []byte { + switch v := j.(type) { case map[string]interface{}, []interface{}, nil: b, err := cbor.Marshal(v) if err != nil { diff --git a/core/chainlink.Dockerfile b/core/chainlink.Dockerfile index ab94f573241..7518ac9127e 100644 --- a/core/chainlink.Dockerfile +++ b/core/chainlink.Dockerfile @@ -26,7 +26,7 @@ RUN make contracts-operator-ui-build # Build the golang binary -FROM golang:1.17-buster +FROM golang:1.18-buster WORKDIR /chainlink COPY GNUmakefile VERSION ./ diff --git a/core/chains/evm/chain.go b/core/chains/evm/chain.go index 91fb59ff0fe..db6c6e71cb9 100644 --- a/core/chains/evm/chain.go +++ b/core/chains/evm/chain.go @@ -16,6 +16,7 @@ import ( "github.com/smartcontractkit/chainlink/core/chains/evm/headtracker" httypes "github.com/smartcontractkit/chainlink/core/chains/evm/headtracker/types" "github.com/smartcontractkit/chainlink/core/chains/evm/log" + "github.com/smartcontractkit/chainlink/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/core/chains/evm/monitor" "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr" "github.com/smartcontractkit/chainlink/core/chains/evm/types" @@ -38,6 +39,7 @@ type Chain interface { HeadTracker() httypes.HeadTracker Logger() logger.Logger BalanceMonitor() monitor.BalanceMonitor + LogPoller() *logpoller.LogPoller } var _ Chain = &chain{} @@ -52,11 +54,12 @@ type chain struct { headBroadcaster httypes.HeadBroadcaster headTracker httypes.HeadTracker logBroadcaster log.Broadcaster + logPoller *logpoller.LogPoller balanceMonitor monitor.BalanceMonitor keyStore keystore.Eth } -func newChain(dbchain types.Chain, opts ChainSetOpts) (*chain, error) { +func newChain(dbchain types.Chain, nodes []types.Node, opts ChainSetOpts) (*chain, error) { chainID := dbchain.ID.ToInt() l := opts.Logger.With("evmChainID", chainID.String()) if !dbchain.Enabled { @@ -78,7 +81,7 @@ func newChain(dbchain types.Chain, opts ChainSetOpts) (*chain, error) { client = evmclient.NewNullClient(chainID, l) } else if opts.GenEthClient == nil { var err2 error - client, err2 = newEthClientFromChain(cfg, l, dbchain) + client, err2 = newEthClientFromChain(cfg, l, dbchain, nodes) if err2 != nil { return nil, errors.Wrapf(err2, "failed to instantiate eth client for chain with ID %s", dbchain.ID.String()) } @@ -140,6 +143,7 @@ func newChain(dbchain types.Chain, opts ChainSetOpts) (*chain, error) { } else { logBroadcaster = opts.GenLogBroadcaster(dbchain) } + logPoller := logpoller.NewLogPoller(logpoller.NewORM(chainID, db, l, cfg), client, l, cfg.EvmLogPollInterval(), int64(cfg.EvmFinalityDepth()), int64(cfg.EvmLogBackfillBatchSize())) // AddDependent for this chain // log broadcaster will not start until dependent ready is called by a @@ -148,20 +152,19 @@ func newChain(dbchain types.Chain, opts ChainSetOpts) (*chain, error) { headBroadcaster.Subscribe(logBroadcaster) - c := chain{ - utils.StartStopOnce{}, - chainID, - cfg, - client, - txm, - l, - headBroadcaster, - headTracker, - logBroadcaster, - balanceMonitor, - opts.KeyStore, - } - return &c, nil + return &chain{ + id: chainID, + cfg: cfg, + client: client, + txm: txm, + logger: l, + headBroadcaster: headBroadcaster, + headTracker: headTracker, + logBroadcaster: logBroadcaster, + logPoller: logPoller, + balanceMonitor: balanceMonitor, + keyStore: opts.KeyStore, + }, nil } func (c *chain) Start(ctx context.Context) error { @@ -172,6 +175,8 @@ func (c *chain) Start(ctx context.Context) error { if err := c.client.Dial(ctx); err != nil { return errors.Wrap(err, "failed to dial ethclient") } + // We do not start the log poller here, it gets + // started after the jobs so they have a chance to apply their filters. merr = multierr.Combine( c.txm.Start(ctx), c.headBroadcaster.Start(ctx), @@ -275,14 +280,14 @@ func (c *chain) ID() *big.Int { return c.id } func (c *chain) Client() evmclient.Client { return c.client } func (c *chain) Config() evmconfig.ChainScopedConfig { return c.cfg } func (c *chain) LogBroadcaster() log.Broadcaster { return c.logBroadcaster } +func (c *chain) LogPoller() *logpoller.LogPoller { return c.logPoller } func (c *chain) HeadBroadcaster() httypes.HeadBroadcaster { return c.headBroadcaster } func (c *chain) TxManager() txmgr.TxManager { return c.txm } func (c *chain) HeadTracker() httypes.HeadTracker { return c.headTracker } func (c *chain) Logger() logger.Logger { return c.logger } func (c *chain) BalanceMonitor() monitor.BalanceMonitor { return c.balanceMonitor } -func newEthClientFromChain(cfg evmclient.NodeConfig, lggr logger.Logger, chain types.Chain) (evmclient.Client, error) { - nodes := chain.Nodes +func newEthClientFromChain(cfg evmclient.NodeConfig, lggr logger.Logger, chain types.Chain, nodes []types.Node) (evmclient.Client, error) { chainID := big.Int(chain.ID) var primaries []evmclient.Node var sendonlys []evmclient.SendOnlyNode diff --git a/core/chains/evm/chain_set.go b/core/chains/evm/chain_set.go index 36057f95fd1..fa29e0c84ed 100644 --- a/core/chains/evm/chain_set.go +++ b/core/chains/evm/chain_set.go @@ -141,10 +141,9 @@ func (cll *chainSet) initializeChain(ctx context.Context, dbchain *types.Chain) if err != nil { return err } - dbchain.Nodes = nodes cid := dbchain.ID.String() - chain, err := newChain(*dbchain, cll.opts) + chain, err := newChain(*dbchain, nodes, cll.opts) if err != nil { return errors.Wrapf(err, "initializeChain: failed to instantiate chain %s", dbchain.ID.String()) } @@ -362,14 +361,23 @@ func LoadChainSet(opts ChainSetOpts) (ChainSet, error) { if err := checkOpts(&opts); err != nil { return nil, err } - dbchains, err := opts.ORM.EnabledChainsWithNodes() + chains, err := opts.ORM.EnabledChains() if err != nil { return nil, errors.Wrap(err, "error loading chains") } - return NewChainSet(opts, dbchains) + nodesSlice, _, err := opts.ORM.Nodes(0, -1) + if err != nil { + return nil, errors.Wrap(err, "error loading nodes") + } + nodes := make(map[string][]types.Node) + for _, n := range nodesSlice { + id := n.EVMChainID.String() + nodes[id] = append(nodes[id], n) + } + return NewChainSet(opts, chains, nodes) } -func NewChainSet(opts ChainSetOpts, dbchains []types.Chain) (ChainSet, error) { +func NewChainSet(opts ChainSetOpts, dbchains []types.Chain, nodes map[string][]types.Node) (ChainSet, error) { if err := checkOpts(&opts); err != nil { return nil, err } @@ -386,7 +394,7 @@ func NewChainSet(opts ChainSetOpts, dbchains []types.Chain) (ChainSet, error) { for i := range dbchains { cid := dbchains[i].ID.String() cll.logger.Infow(fmt.Sprintf("Loading chain %s", cid), "evmChainID", cid) - chain, err2 := newChain(dbchains[i], opts) + chain, err2 := newChain(dbchains[i], nodes[cid], opts) if err2 != nil { err = multierr.Combine(err, err2) continue diff --git a/core/chains/evm/client/client.go b/core/chains/evm/client/client.go index 77a772120e2..b94116151e0 100644 --- a/core/chains/evm/client/client.go +++ b/core/chains/evm/client/client.go @@ -11,7 +11,7 @@ import ( "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/utils" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" @@ -61,6 +61,7 @@ type Client interface { NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) + BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) @@ -239,6 +240,10 @@ func (client *client) BlockByNumber(ctx context.Context, number *big.Int) (*type return client.pool.BlockByNumber(ctx, number) } +func (client *client) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return client.pool.BlockByHash(ctx, hash) +} + func (client *client) HeadByNumber(ctx context.Context, number *big.Int) (head *evmtypes.Head, err error) { hex := ToBlockNumArg(number) err = client.pool.CallContext(ctx, &head, "eth_getBlockByNumber", hex, false) diff --git a/core/chains/evm/client/erroring_node.go b/core/chains/evm/client/erroring_node.go index 9c3b4c65670..178f08d9f9e 100644 --- a/core/chains/evm/client/erroring_node.go +++ b/core/chains/evm/client/erroring_node.go @@ -61,6 +61,10 @@ func (e *erroringNode) BlockByNumber(ctx context.Context, number *big.Int) (*typ return nil, errors.New(e.errMsg) } +func (e *erroringNode) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return nil, errors.New(e.errMsg) +} + func (e *erroringNode) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { return nil, errors.New(e.errMsg) } diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index d8a13b51f46..85cd3ff09b2 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -48,3 +48,8 @@ func NewClientWithTestNode(cfg NodeConfig, lggr logger.Logger, rpcUrl string, rp func Wrap(err error, s string) error { return wrap(err, s) } + +type TestableSendOnlyNode interface { + SendOnlyNode + SetEthClient(newBatchSender BatchSender, newSender TxSender) +} diff --git a/core/chains/evm/client/mocks/BatchSender.go b/core/chains/evm/client/mocks/BatchSender.go new file mode 100644 index 00000000000..d01cc90c8d4 --- /dev/null +++ b/core/chains/evm/client/mocks/BatchSender.go @@ -0,0 +1,29 @@ +// Code generated by mockery v2.10.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + rpc "github.com/ethereum/go-ethereum/rpc" + mock "github.com/stretchr/testify/mock" +) + +// BatchSender is an autogenerated mock type for the BatchSender type +type BatchSender struct { + mock.Mock +} + +// BatchCallContext provides a mock function with given fields: ctx, b +func (_m *BatchSender) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + ret := _m.Called(ctx, b) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []rpc.BatchElem) error); ok { + r0 = rf(ctx, b) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/core/chains/evm/client/mocks/TxSender.go b/core/chains/evm/client/mocks/TxSender.go new file mode 100644 index 00000000000..a76de8da3ab --- /dev/null +++ b/core/chains/evm/client/mocks/TxSender.go @@ -0,0 +1,55 @@ +// Code generated by mockery v2.10.1. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// TxSender is an autogenerated mock type for the TxSender type +type TxSender struct { + mock.Mock +} + +// ChainID provides a mock function with given fields: _a0 +func (_m *TxSender) ChainID(_a0 context.Context) (*big.Int, error) { + ret := _m.Called(_a0) + + var r0 *big.Int + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendTransaction provides a mock function with given fields: ctx, tx +func (_m *TxSender) SendTransaction(ctx context.Context, tx *types.Transaction) error { + ret := _m.Called(ctx, tx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/core/chains/evm/client/node.go b/core/chains/evm/client/node.go index 90676734cf4..c8c12c56412 100644 --- a/core/chains/evm/client/node.go +++ b/core/chains/evm/client/node.go @@ -98,6 +98,7 @@ type Node interface { NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) + BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) @@ -134,6 +135,10 @@ type node struct { state NodeState stateMu sync.RWMutex + // Need to track subscriptions because closing the RPC does not (always?) + // close the underlying subscription + subs []ethereum.Subscription + // chStopInFlight can be closed to immediately cancel all in-flight requests on // this node. Closing and replacing should be serialized through // stateMu since it can happen on state transitions as well as node Close. @@ -344,6 +349,24 @@ func (n *node) Close() { } } +// registerSub adds the sub to the node list +func (n *node) registerSub(sub ethereum.Subscription) { + n.stateMu.Lock() + defer n.stateMu.Unlock() + n.subs = append(n.subs, sub) +} + +// disconnectAll disconnects all clients connected to the node +// WARNING: NOT THREAD-SAFE +// This must be called from within the n.stateMu lock +func (n *node) disconnectAll() { + if n.ws.rpc != nil { + n.ws.rpc.Close() + } + n.cancelInflightRequests() + n.unsubscribeAll() +} + // cancelInflightRequests closes and replaces the chStopInFlight // WARNING: NOT THREAD-SAFE // This must be called from within the n.stateMu lock @@ -352,6 +375,16 @@ func (n *node) cancelInflightRequests() { n.chStopInFlight = make(chan struct{}) } +// unsubscribeAll unsubscribes all subscriptions +// WARNING: NOT THREAD-SAFE +// This must be called from within the n.stateMu lock +func (n *node) unsubscribeAll() { + for _, sub := range n.subs { + sub.Unsubscribe() + } + n.subs = nil +} + // getChStopInflight provides a convenience helper that mutex wraps a // read to the chStopInFlight func (n *node) getChStopInflight() chan struct{} { @@ -444,6 +477,9 @@ func (n *node) EthSubscribe(ctx context.Context, channel chan<- *evmtypes.Head, "err", err, ) + if sub != nil { + n.registerSub(sub) + } return sub, err } @@ -770,6 +806,35 @@ func (n *node) BlockByNumber(ctx context.Context, number *big.Int) (b *types.Blo return } +func (n *node) BlockByHash(ctx context.Context, hash common.Hash) (b *types.Block, err error) { + ctx, cancel, err := n.makeLiveQueryCtx(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr(switching(n)).With("hash", hash) + + lggr.Debug("RPC call: evmclient.Client#BlockByHash") + start := time.Now() + if n.http != nil { + b, err = n.http.geth.BlockByHash(ctx, hash) + err = n.wrapHTTP(err) + } else { + b, err = n.ws.geth.BlockByHash(ctx, hash) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "BlockByHash", + "block", b, + "duration", duration, + "rpcDomain", n.getRPCDomain(), + "err", err, + ) + + return +} + func (n *node) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (balance *big.Int, err error) { ctx, cancel, err := n.makeLiveQueryCtx(ctx) if err != nil { @@ -848,6 +913,10 @@ func (n *node) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, "err", err, ) + if sub != nil { + n.registerSub(sub) + } + return } diff --git a/core/chains/evm/client/node_fsm.go b/core/chains/evm/client/node_fsm.go index 23fa3979db9..f7b94ad2e6c 100644 --- a/core/chains/evm/client/node_fsm.go +++ b/core/chains/evm/client/node_fsm.go @@ -184,9 +184,7 @@ func (n *node) transitionToOutOfSync(fn func()) { } switch n.state { case NodeStateAlive: - // Need to disconnect all clients subscribed to this node - n.ws.rpc.Close() - n.cancelInflightRequests() + n.disconnectAll() n.state = NodeStateOutOfSync default: panic(fmt.Sprintf("cannot transition from %#v to %#v", n.state, NodeStateOutOfSync)) @@ -211,9 +209,7 @@ func (n *node) transitionToUnreachable(fn func()) { } switch n.state { case NodeStateUndialed, NodeStateDialed, NodeStateAlive, NodeStateOutOfSync, NodeStateInvalidChainID: - // Need to disconnect all clients subscribed to this node - n.ws.rpc.Close() - n.cancelInflightRequests() + n.disconnectAll() n.state = NodeStateUnreachable default: panic(fmt.Sprintf("cannot transition from %#v to %#v", n.state, NodeStateUnreachable)) @@ -238,9 +234,7 @@ func (n *node) transitionToInvalidChainID(fn func()) { } switch n.state { case NodeStateDialed: - // Need to disconnect all clients subscribed to this node - n.ws.rpc.Close() - n.cancelInflightRequests() + n.disconnectAll() n.state = NodeStateInvalidChainID default: panic(fmt.Sprintf("cannot transition from %#v to %#v", n.state, NodeStateInvalidChainID)) diff --git a/core/chains/evm/client/node_fsm_test.go b/core/chains/evm/client/node_fsm_test.go index 5c7cb7d7bf6..1351c549ac8 100644 --- a/core/chains/evm/client/node_fsm_test.go +++ b/core/chains/evm/client/node_fsm_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/ethereum/go-ethereum" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" @@ -30,6 +31,15 @@ func (fm *fnMock) AssertNumberOfCalls(t *testing.T, n int) { assert.Equal(t, n, fm.calls) } +var _ ethereum.Subscription = (*subMock)(nil) + +type subMock struct{ unsubbed bool } + +func (s *subMock) Unsubscribe() { + s.unsubbed = true +} +func (s *subMock) Err() <-chan error { return nil } + func TestUnit_Node_StateTransitions(t *testing.T) { s := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (string, string) { return "", "" @@ -87,6 +97,15 @@ func TestUnit_Node_StateTransitions(t *testing.T) { n.transitionToOutOfSync(m.Fn) m.AssertCalled(t) }) + t.Run("transitionToOutOfSync unsubscribes everything", func(t *testing.T) { + m := new(fnMock) + n.setState(NodeStateAlive) + sub := &subMock{} + n.registerSub(sub) + n.transitionToOutOfSync(m.Fn) + m.AssertNumberOfCalls(t, 1) + assert.True(t, sub.unsubbed) + }) t.Run("transitionToUnreachable", func(t *testing.T) { m := new(fnMock) n.setState(NodeStateUnreachable) @@ -110,6 +129,15 @@ func TestUnit_Node_StateTransitions(t *testing.T) { n.transitionToUnreachable(m.Fn) m.AssertNumberOfCalls(t, 5) }) + t.Run("transitionToUnreachable unsubscribes everything", func(t *testing.T) { + m := new(fnMock) + n.setState(NodeStateDialed) + sub := &subMock{} + n.registerSub(sub) + n.transitionToUnreachable(m.Fn) + m.AssertNumberOfCalls(t, 1) + assert.True(t, sub.unsubbed) + }) t.Run("transitionToInvalidChainID", func(t *testing.T) { m := new(fnMock) n.setState(NodeStateUnreachable) @@ -121,6 +149,15 @@ func TestUnit_Node_StateTransitions(t *testing.T) { n.transitionToInvalidChainID(m.Fn) m.AssertCalled(t) }) + t.Run("transitionToInvalidChainID unsubscribes everything", func(t *testing.T) { + m := new(fnMock) + n.setState(NodeStateDialed) + sub := &subMock{} + n.registerSub(sub) + n.transitionToInvalidChainID(m.Fn) + m.AssertNumberOfCalls(t, 1) + assert.True(t, sub.unsubbed) + }) t.Run("Close", func(t *testing.T) { // first attempt panics due to node being unstarted assert.Panics(t, n.Close) diff --git a/core/chains/evm/client/node_lifecycle_test.go b/core/chains/evm/client/node_lifecycle_test.go index 144c2c32aac..c21c581b2d4 100644 --- a/core/chains/evm/client/node_lifecycle_test.go +++ b/core/chains/evm/client/node_lifecycle_test.go @@ -32,7 +32,7 @@ func newTestNodeWithCallback(t *testing.T, cfg NodeConfig, callback testutils.JS return n } -// dial setups up the node and puts it into the live state, bypassing the +// dial sets up the node and puts it into the live state, bypassing the // normal Start() method which would fire off unwanted goroutines func dial(t *testing.T, n *node) { ctx := testutils.TestCtx(t) @@ -65,12 +65,10 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { dial(t, n) ch := make(chan struct{}) + n.wg.Add(1) go func() { - n.IfStarted(func() { - n.wg.Add(1) - n.aliveLoop() - }) - close(ch) + defer close(ch) + n.aliveLoop() }() n.Close() testutils.WaitWithTimeout(t, ch, "expected aliveLoop to exit") @@ -217,18 +215,44 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) }) - t.Run("when no new heads received for threshold, transitions to unreachable", func(t *testing.T) { - pollDisabledCfg := TestNodeConfig{NoNewHeadsThreshold: testutils.TestInterval} - n := newTestNode(t, pollDisabledCfg) + t.Run("when no new heads received for threshold, transitions to out of sync", func(t *testing.T) { + cfg := TestNodeConfig{NoNewHeadsThreshold: 1 * time.Second} + chSubbed := make(chan struct{}, 2) + s := testutils.NewWSServer(t, testutils.FixtureChainID, + func(method string, params gjson.Result) (respResult string, notifyResult string) { + switch method { + case "eth_subscribe": + select { + case chSubbed <- struct{}{}: + default: + } + return `"0x00"`, makeHeadResult(0) + case "web3_clientVersion": + return `"test client version 2"`, "" + default: + t.Fatalf("unexpected RPC method: %s", method) + } + return "", "" + }) + defer s.Close() + + iN := NewNode(cfg, logger.TestLogger(t), *s.WSURL(), nil, "test node", 42, testutils.FixtureChainID) + n := iN.(*node) + dial(t, n) defer n.Close() n.wg.Add(1) go n.aliveLoop() + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription for InSync") + testutils.AssertEventually(t, func() bool { - return n.State() == NodeStateUnreachable + return n.State() == NodeStateOutOfSync }) + + // Otherwise, there may be data race on dial() vs Close() (accessing ws.rpc) + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription for OutOfSync") }) t.Run("when no new heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { @@ -271,12 +295,11 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { n.setState(NodeStateOutOfSync) ch := make(chan struct{}) + + n.wg.Add(1) go func() { - n.IfStarted(func() { - n.wg.Add(1) - n.aliveLoop() - }) - close(ch) + defer close(ch) + n.aliveLoop() }() n.Close() testutils.WaitWithTimeout(t, ch, "expected outOfSyncLoop to exit") @@ -438,8 +461,8 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { n.setState(NodeStateUnreachable) ch := make(chan struct{}) + n.wg.Add(1) go func() { - n.wg.Add(1) n.unreachableLoop() close(ch) }() @@ -508,8 +531,8 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { n.setState(NodeStateInvalidChainID) ch := make(chan struct{}) + n.wg.Add(1) go func() { - n.wg.Add(1) n.invalidChainIDLoop() close(ch) }() diff --git a/core/chains/evm/client/null_client.go b/core/chains/evm/client/null_client.go index fbc82ba5a25..e1c24b08790 100644 --- a/core/chains/evm/client/null_client.go +++ b/core/chains/evm/client/null_client.go @@ -4,7 +4,7 @@ import ( "context" "math/big" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rpc" @@ -145,6 +145,11 @@ func (nc *NullClient) BlockByNumber(ctx context.Context, number *big.Int) (*type return nil, nil } +func (nc *NullClient) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + nc.lggr.Debug("BlockByHash") + return nil, nil +} + func (nc *NullClient) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { nc.lggr.Debug("BalanceAt") return big.NewInt(0), nil diff --git a/core/chains/evm/client/pool.go b/core/chains/evm/client/pool.go index b96f6042963..076aab32ea9 100644 --- a/core/chains/evm/client/pool.go +++ b/core/chains/evm/client/pool.go @@ -328,6 +328,10 @@ func (p *Pool) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block return p.getRoundRobin().BlockByNumber(ctx, number) } +func (p *Pool) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return p.getRoundRobin().BlockByHash(ctx, hash) +} + func (p *Pool) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { return p.getRoundRobin().BalanceAt(ctx, account, blockNumber) } diff --git a/core/chains/evm/client/send_only_node.go b/core/chains/evm/client/send_only_node.go index 43a766dfcef..b0a28b86192 100644 --- a/core/chains/evm/client/send_only_node.go +++ b/core/chains/evm/client/send_only_node.go @@ -3,6 +3,7 @@ package client import ( "context" "fmt" + "log" "math/big" "net/url" "strconv" @@ -33,18 +34,30 @@ type SendOnlyNode interface { String() string } +//go:generate mockery --name TxSender --output ./mocks/ --case=underscore + +type TxSender interface { + SendTransaction(ctx context.Context, tx *types.Transaction) error + ChainID(context.Context) (*big.Int, error) +} + +//go:generate mockery --name BatchSender --output ./mocks/ --case=underscore + +type BatchSender interface { + BatchCallContext(ctx context.Context, b []rpc.BatchElem) error +} + // It only supports sending transactions // It must a http(s) url type sendOnlyNode struct { - uri url.URL - rpc *rpc.Client - geth *ethclient.Client - log logger.Logger - dialed bool - name string - chainID *big.Int - - chStop chan struct{} + uri url.URL + batchSender BatchSender + sender TxSender + log logger.Logger + dialed bool + name string + chainID *big.Int + chStop chan struct{} } // NewSendOnlyNode returns a new sendonly node @@ -75,8 +88,8 @@ func (s *sendOnlyNode) Start(startCtx context.Context) error { return errors.Wrapf(err, "failed to dial secondary client: %v", s.uri.Redacted()) } s.dialed = true - s.rpc = rpc - s.geth = ethclient.NewClient(rpc) + geth := ethclient.NewClient(rpc) + s.SetEthClient(rpc, geth) if id, err := s.getChainID(startCtx); err != nil { s.log.Warn("sendonly rpc ChainID verification skipped", "err", err) @@ -92,6 +105,15 @@ func (s *sendOnlyNode) Start(startCtx context.Context) error { return nil } +func (s *sendOnlyNode) SetEthClient(newBatchSender BatchSender, newSender TxSender) { + if s.sender != nil { + log.Panicf("sendOnlyNode.SetEthClient should only be called once!") + return + } + s.batchSender = newBatchSender + s.sender = newSender +} + func (s *sendOnlyNode) Close() { close(s.chStop) } @@ -112,7 +134,7 @@ func (s *sendOnlyNode) logTiming(lggr logger.Logger, duration time.Duration, err "rpcDomain", s.uri.Host, "name", s.name, "chainID", s.chainID, - "sendOnly", false, + "sendOnly", true, "err", err, ) } @@ -124,7 +146,7 @@ func (s *sendOnlyNode) SendTransaction(parentCtx context.Context, tx *types.Tran ctx, cancel := s.makeQueryCtx(parentCtx) defer cancel() - return s.wrap(s.geth.SendTransaction(ctx, tx)) + return s.wrap(s.sender.SendTransaction(ctx, tx)) } func (s *sendOnlyNode) BatchCallContext(parentCtx context.Context, b []rpc.BatchElem) (err error) { @@ -134,7 +156,7 @@ func (s *sendOnlyNode) BatchCallContext(parentCtx context.Context, b []rpc.Batch ctx, cancel := s.makeQueryCtx(parentCtx) defer cancel() - return s.wrap(s.rpc.BatchCallContext(ctx, b)) + return s.wrap(s.batchSender.BatchCallContext(ctx, b)) } func (s *sendOnlyNode) ChainID() (chainID *big.Int) { @@ -154,7 +176,7 @@ func (s *sendOnlyNode) getChainID(parentCtx context.Context) (*big.Int, error) { ctx, cancel := s.makeQueryCtx(parentCtx) defer cancel() - chainID, err := s.geth.ChainID(ctx) + chainID, err := s.sender.ChainID(ctx) if err != nil { return nil, err } else if chainID.Cmp(big.NewInt(0)) == 0 { diff --git a/core/chains/evm/client/send_only_node_test.go b/core/chains/evm/client/send_only_node_test.go new file mode 100644 index 00000000000..30dd6baaf01 --- /dev/null +++ b/core/chains/evm/client/send_only_node_test.go @@ -0,0 +1,164 @@ +package client_test + +import ( + "context" + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rpc" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink/core/chains/evm/client/mocks" + + "github.com/smartcontractkit/chainlink/core/assets" + evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/core/internal/testutils" + "github.com/smartcontractkit/chainlink/core/logger" +) + +func TestNewSendOnlyNode(t *testing.T) { + t.Parallel() + + urlFormat := "http://user:%s@testurl.com" + password := "pass" + url := testutils.MustParseURL(t, fmt.Sprintf(urlFormat, password)) + redacted := fmt.Sprintf(urlFormat, "xxxxx") + lggr := logger.TestLogger(t) + name := "TestNewSendOnlyNode" + chainID := testutils.NewRandomEVMChainID() + + node := evmclient.NewSendOnlyNode(lggr, *url, name, chainID) + assert.NotNil(t, node) + + // Must contain name & url with redacted password + assert.Contains(t, node.String(), fmt.Sprintf("%s:%s", name, redacted)) + assert.Equal(t, node.ChainID(), chainID) +} + +func TestStartSendOnlyNode(t *testing.T) { + t.Parallel() + + t.Run("Start with Random ChainID", func(t *testing.T) { + t.Parallel() + chainID := testutils.NewRandomEVMChainID() + r := chainIDResp{chainID.Int64(), nil} + url := r.newHTTPServer(t) + lggr, observedLogs := logger.TestLoggerObserved(t, zap.WarnLevel) + s := evmclient.NewSendOnlyNode(lggr, *url, t.Name(), chainID) + defer s.Close() + err := s.Start(testutils.Context(t)) + assert.NoError(t, err) // No errors expected + assert.Equal(t, 0, observedLogs.Len()) // No warnings expected + }) + + t.Run("Start with ChainID=0", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestLoggerObserved(t, zap.WarnLevel) + chainID := testutils.FixtureChainID + r := chainIDResp{chainID.Int64(), nil} + url := r.newHTTPServer(t) + s := evmclient.NewSendOnlyNode(lggr, *url, t.Name(), testutils.FixtureChainID) + + defer s.Close() + err := s.Start(testutils.Context(t)) + assert.NoError(t, err) + // getChainID() should return Error if ChainID = 0 + // This should get converted into a warning from Start() + testutils.WaitForLogMessage(t, observedLogs, "ChainID verification skipped") + }) +} + +func createSignedTx(t *testing.T, chainID *big.Int, nonce uint64, data []byte) *types.Transaction { + key, err := crypto.GenerateKey() + require.NoError(t, err) + sender, err := bind.NewKeyedTransactorWithChainID(key, chainID) + require.NoError(t, err) + tx := types.NewTransaction( + nonce, sender.From, + assets.Ether(100), + 21000, big.NewInt(1000000000), data, + ) + signedTx, err := sender.Signer(sender.From, tx) + require.NoError(t, err) + return signedTx +} + +func TestSendTransaction(t *testing.T) { + t.Parallel() + + chainID := testutils.FixtureChainID + lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel) + url := testutils.MustParseURL(t, "http://place.holder") + s := evmclient.NewSendOnlyNode(lggr, + *url, + t.Name(), + testutils.FixtureChainID).(evmclient.TestableSendOnlyNode) + require.NotNil(t, s) + + signedTx := createSignedTx(t, chainID, 1, []byte{1, 2, 3}) + + mockTxSender := new(mocks.TxSender) + mockTxSender.Test(t) + + mockTxSender.On("SendTransaction", mock.Anything, mock.MatchedBy( + func(tx *types.Transaction) bool { + if tx.Nonce() != uint64(1) { + return false + } + return true + }, + )).Once().Return(nil) + s.SetEthClient(nil, mockTxSender) + + err := s.SendTransaction(testutils.TestCtx(t), signedTx) + assert.NoError(t, err) + testutils.WaitForLogMessage(t, observedLogs, "SendOnly RPC call") + mockTxSender.AssertExpectations(t) +} + +func TestBatchCallContext(t *testing.T) { + t.Parallel() + + lggr := logger.TestLogger(t) + chainID := testutils.FixtureChainID + url := testutils.MustParseURL(t, "http://place.holder") + s := evmclient.NewSendOnlyNode( + lggr, + *url, "TestBatchCallContext", + chainID).(evmclient.TestableSendOnlyNode) + + blockNum := hexutil.EncodeBig(big.NewInt(42)) + req := []rpc.BatchElem{ + { + Method: "eth_getBlockByNumber", + Args: []interface{}{blockNum, true}, + Result: &types.Block{}, + }, + { + Method: "method", + Args: []interface{}{1, false}}, + } + + mockBatchSender := new(mocks.BatchSender) + mockBatchSender.Test(t) + mockBatchSender.On("BatchCallContext", mock.Anything, + mock.MatchedBy( + func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == blockNum && b[0].Args[1] == true + })).Return(nil).Once().Return(nil) + + s.SetEthClient(mockBatchSender, nil) + + err := s.BatchCallContext(context.Background(), req) + assert.NoError(t, err) + mockBatchSender.AssertExpectations(t) +} diff --git a/core/chains/evm/client/simulated_backend.go b/core/chains/evm/client/simulated_backend.go index c62a3e07d8c..0b742a70d9b 100644 --- a/core/chains/evm/client/simulated_backend.go +++ b/core/chains/evm/client/simulated_backend.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rpc" "github.com/pkg/errors" + "github.com/smartcontractkit/chainlink/core/assets" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" "github.com/smartcontractkit/chainlink/core/logger" @@ -242,6 +243,11 @@ func (c *SimulatedBackendClient) BlockByNumber(ctx context.Context, n *big.Int) return c.b.BlockByNumber(ctx, n) } +// BlockByNumber returns a geth block type. +func (c *SimulatedBackendClient) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return c.b.BlockByHash(ctx, hash) +} + // ChainID returns the ethereum ChainID. func (c *SimulatedBackendClient) ChainID() *big.Int { return c.chainId diff --git a/core/chains/evm/config/chain_scoped_config_orm.go b/core/chains/evm/config/chain_scoped_config_orm.go index 6a1f954a3bf..8739c1afd8a 100644 --- a/core/chains/evm/config/chain_scoped_config_orm.go +++ b/core/chains/evm/config/chain_scoped_config_orm.go @@ -1,13 +1,12 @@ package config import ( - "math/big" - "github.com/smartcontractkit/chainlink/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/core/utils" ) type chainScopedConfigORM struct { - id *big.Int + id utils.Big orm types.ChainConfigORM } diff --git a/core/chains/evm/config/chain_specific_config.go b/core/chains/evm/config/chain_specific_config.go index 55978c5358d..72de17902be 100644 --- a/core/chains/evm/config/chain_specific_config.go +++ b/core/chains/evm/config/chain_specific_config.go @@ -6,7 +6,7 @@ import ( "time" "github.com/smartcontractkit/chainlink/core/assets" - "github.com/smartcontractkit/chainlink/core/chains" + "github.com/smartcontractkit/chainlink/core/config" ) var ( @@ -29,7 +29,7 @@ type ( blockHistoryEstimatorBlockHistorySize uint16 blockHistoryEstimatorEIP1559FeeCapBufferBlocks *uint16 blockHistoryEstimatorTransactionPercentile uint16 - chainType chains.ChainType + chainType config.ChainType eip1559DynamicFees bool ethTxReaperInterval time.Duration ethTxReaperThreshold time.Duration @@ -53,6 +53,7 @@ type ( headTrackerSamplingInterval time.Duration linkContractAddress string logBackfillBatchSize uint32 + logPollInterval time.Duration maxGasPriceWei big.Int maxInFlightTransactions uint32 maxQueuedTransactions uint64 @@ -65,6 +66,7 @@ type ( nodePollInterval time.Duration nonceAutoSync bool + useForwarders bool rpcDefaultBatchSize uint32 // set true if fully configured complete bool @@ -128,6 +130,7 @@ func setChainSpecificConfigDefaultSets() { headTrackerSamplingInterval: 1 * time.Second, linkContractAddress: "", logBackfillBatchSize: 100, + logPollInterval: 15 * time.Second, maxGasPriceWei: *assets.GWei(5000), maxInFlightTransactions: 16, maxQueuedTransactions: 250, @@ -139,6 +142,7 @@ func setChainSpecificConfigDefaultSets() { nodePollFailureThreshold: 5, nodePollInterval: 10 * time.Second, nonceAutoSync: true, + useForwarders: false, ocrContractConfirmations: 4, ocrContractTransmitterTransmitTimeout: 10 * time.Second, ocrDatabaseTimeout: 10 * time.Second, @@ -176,12 +180,13 @@ func setChainSpecificConfigDefaultSets() { // With xDai's current maximum of 19 validators then 40 blocks is the maximum possible re-org) // The mainnet default of 50 blocks is ok here xDaiMainnet := fallbackDefaultSet - xDaiMainnet.chainType = chains.XDai + xDaiMainnet.chainType = config.ChainXDai xDaiMainnet.gasBumpThreshold = 3 // 15s delay since feeds update every minute in volatile situations xDaiMainnet.gasPriceDefault = *assets.GWei(1) xDaiMainnet.minGasPriceWei = *assets.GWei(1) // 1 Gwei is the minimum accepted by the validators (unless whitelisted) xDaiMainnet.maxGasPriceWei = *assets.GWei(500) xDaiMainnet.linkContractAddress = "0xE2e73A1c69ecF83F464EFCE6A5be353a37cA09b2" + xDaiMainnet.logPollInterval = 5 * time.Second // BSC uses Clique consensus with ~3s block times // Clique offers finality within (N/2)+1 blocks where N is number of signers @@ -205,6 +210,7 @@ func setChainSpecificConfigDefaultSets() { bscMainnet.ocrDatabaseTimeout = 2 * time.Second bscMainnet.ocrContractTransmitterTransmitTimeout = 2 * time.Second bscMainnet.ocrObservationGracePeriod = 500 * time.Millisecond + bscMainnet.logPollInterval = 3 * time.Second hecoMainnet := bscMainnet @@ -228,12 +234,13 @@ func setChainSpecificConfigDefaultSets() { polygonMainnet.linkContractAddress = "0xb0897686c545045afc77cf20ec7a532e3120e0f1" polygonMainnet.minIncomingConfirmations = 5 polygonMainnet.minRequiredOutgoingConfirmations = 12 + polygonMainnet.logPollInterval = 1 * time.Second polygonMumbai := polygonMainnet polygonMumbai.linkContractAddress = "0x326C977E6efc84E512bB9C30f76E30c160eD06FB" // Arbitrum is an L2 chain. Pending proper L2 support, for now we rely on their sequencer arbitrumMainnet := fallbackDefaultSet - arbitrumMainnet.chainType = chains.Arbitrum + arbitrumMainnet.chainType = config.ChainArbitrum arbitrumMainnet.gasBumpThreshold = 0 // Disable gas bumping on arbitrum arbitrumMainnet.gasLimitDefault = 7000000 arbitrumMainnet.gasLimitTransfer = 800000 // estimating gas returns 695,344 so 800,000 should be safe with some buffer @@ -251,7 +258,7 @@ func setChainSpecificConfigDefaultSets() { optimismMainnet := fallbackDefaultSet optimismMainnet.balanceMonitorBlockDelay = 0 optimismMainnet.blockHistoryEstimatorBlockHistorySize = 0 // Force an error if someone set GAS_UPDATER_ENABLED=true by accident; we never want to run the block history estimator on optimism - optimismMainnet.chainType = chains.Optimism + optimismMainnet.chainType = config.ChainOptimism optimismMainnet.ethTxResendAfterThreshold = 15 * time.Second optimismMainnet.finalityDepth = 1 // Sequencer offers absolute finality as long as no re-org longer than 20 blocks occurs on main chain this event would require special handling (new txm) optimismMainnet.gasBumpThreshold = 0 // Never bump gas on optimism @@ -273,6 +280,7 @@ func setChainSpecificConfigDefaultSets() { fantomMainnet.linkContractAddress = "0x6f43ff82cca38001b6699a8ac47a2d0e66939407" fantomMainnet.minIncomingConfirmations = 3 fantomMainnet.minRequiredOutgoingConfirmations = 2 + fantomMainnet.logPollInterval = 1 * time.Second fantomTestnet := fantomMainnet fantomTestnet.linkContractAddress = "0xfafedb041c0dd4fa2dc0d87a6b0979ee6fa7af5f" @@ -285,6 +293,7 @@ func setChainSpecificConfigDefaultSets() { rskMainnet.gasFeeCapDefault = *big.NewInt(100000000) // rsk does not yet support EIP-1559 but this allows validation to pass rskMainnet.minGasPriceWei = *big.NewInt(0) rskMainnet.minimumContractPayment = assets.NewLinkFromJuels(1000000000000000) + rskMainnet.logPollInterval = 30 * time.Second rskTestnet := rskMainnet rskTestnet.linkContractAddress = "0x8bbbd80981fe76d44854d8df305e8985c19f0e78" @@ -301,6 +310,7 @@ func setChainSpecificConfigDefaultSets() { avalancheMainnet.minIncomingConfirmations = 1 avalancheMainnet.minRequiredOutgoingConfirmations = 1 avalancheMainnet.ocrContractConfirmations = 1 + avalancheMainnet.logPollInterval = 3 * time.Second avalancheFuji := avalancheMainnet avalancheFuji.linkContractAddress = "0x0b9d5D9136855f6FEc3c0993feE6E9CE8a297846" @@ -311,13 +321,14 @@ func setChainSpecificConfigDefaultSets() { harmonyMainnet.gasPriceDefault = *assets.GWei(5) harmonyMainnet.minIncomingConfirmations = 1 harmonyMainnet.minRequiredOutgoingConfirmations = 2 + harmonyMainnet.logPollInterval = 2 * time.Second harmonyTestnet := harmonyMainnet harmonyTestnet.linkContractAddress = "0x8b12Ac23BFe11cAb03a634C1F117D64a7f2cFD3e" // OKExChain // (stubbed so that the ChainType is autoset for known IDs) okxMainnet := fallbackDefaultSet - okxMainnet.chainType = chains.ExChain + okxMainnet.chainType = config.ChainExChain okxTestnet := okxMainnet diff --git a/core/chains/evm/config/config.go b/core/chains/evm/config/config.go index 929b632afe0..a57460ffa61 100644 --- a/core/chains/evm/config/config.go +++ b/core/chains/evm/config/config.go @@ -16,7 +16,6 @@ import ( ocrtypes "github.com/smartcontractkit/libocr/offchainreporting/types" "github.com/smartcontractkit/chainlink/core/assets" - "github.com/smartcontractkit/chainlink/core/chains" evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" "github.com/smartcontractkit/chainlink/core/config" @@ -57,15 +56,17 @@ type ChainScopedOnlyConfig interface { EvmHeadTrackerMaxBufferSize() uint32 EvmHeadTrackerSamplingInterval() time.Duration EvmLogBackfillBatchSize() uint32 + EvmLogPollInterval() time.Duration EvmMaxGasPriceWei() *big.Int EvmMaxInFlightTransactions() uint32 EvmMaxQueuedTransactions() uint64 EvmMinGasPriceWei() *big.Int EvmNonceAutoSync() bool + EvmUseForwarders() bool EvmRPCDefaultBatchSize() uint32 FlagsContractAddress() string GasEstimatorMode() string - ChainType() chains.ChainType + ChainType() config.ChainType KeySpecificMaxGasPriceWei(addr gethcommon.Address) *big.Int LinkContractAddress() string MinIncomingConfirmations() uint32 @@ -114,7 +115,7 @@ type chainScopedConfig struct { } func NewChainScopedConfig(chainID *big.Int, cfg evmtypes.ChainCfg, orm evmtypes.ChainConfigORM, lggr logger.Logger, gcfg config.GeneralConfig) ChainScopedConfig { - csorm := &chainScopedConfigORM{chainID, orm} + csorm := &chainScopedConfigORM{*utils.NewBig(chainID), orm} defaultSet, exists := chainSpecificConfigDefaultSets[chainID.Int64()] if !exists { lggr.Warnf("Unrecognised chain %d, falling back to generic default configuration", chainID) @@ -213,22 +214,22 @@ func (c *chainScopedConfig) validate() (err error) { err = multierr.Combine(err, errors.Errorf("CHAIN_TYPE %q cannot be used with chain ID %d", chainType, c.ChainID())) } else { switch chainType { - case chains.Arbitrum: + case config.ChainArbitrum: if gasEst := c.GasEstimatorMode(); gasEst != "FixedPrice" { err = multierr.Combine(err, errors.Errorf("GAS_ESTIMATOR_MODE %q is not allowed with chain type %q - "+ - "must be %q", gasEst, chains.Arbitrum, "FixedPrice")) + "must be %q", gasEst, config.ChainArbitrum, "FixedPrice")) } - case chains.ExChain: + case config.ChainExChain: - case chains.Optimism: + case config.ChainOptimism: gasEst := c.GasEstimatorMode() switch gasEst { case "Optimism", "Optimism2": default: err = multierr.Combine(err, errors.Errorf("GAS_ESTIMATOR_MODE %q is not allowed with chain type %q - "+ - "must be %q or %q", gasEst, chains.Optimism, "Optimism", "Optimism2")) + "must be %q or %q", gasEst, config.ChainOptimism, "Optimism", "Optimism2")) } - case chains.XDai: + case config.ChainXDai: } } @@ -568,11 +569,11 @@ func (c *chainScopedConfig) BlockHistoryEstimatorBatchSize() (size uint32) { c.logEnvOverrideOnce("BlockHistoryEstimatorBatchSize", val) size = val } else { - valLegacy, set := c.lookupEnv("GAS_UPDATER_BATCH_SIZE", parse.Uint32) + valLegacy, set := lookupEnv(c, "GAS_UPDATER_BATCH_SIZE", parse.Uint32) if set { c.logEnvOverrideOnce("GAS_UPDATER_BATCH_SIZE", valLegacy) - c.logger.Warn("GAS_UPDATER_BATCH_SIZE is deprecated, please use BLOCK_HISTORY_ESTIMATOR_BATCH_SIZE instead (or simply remove to use the default)") - size = valLegacy.(uint32) + c.logger.Error("GAS_UPDATER_BATCH_SIZE is deprecated, please use BLOCK_HISTORY_ESTIMATOR_BATCH_SIZE instead (or simply remove to use the default)") + size = valLegacy } else { size = c.defaultSet.blockHistoryEstimatorBatchSize } @@ -597,12 +598,12 @@ func (c *chainScopedConfig) BlockHistoryEstimatorBlockDelay() uint16 { c.logEnvOverrideOnce("BlockHistoryEstimatorBlockDelay", val) return val } - valLegacy, set := c.lookupEnv("GAS_UPDATER_BLOCK_DELAY", parse.Uint16) + valLegacy, set := lookupEnv(c, "GAS_UPDATER_BLOCK_DELAY", parse.Uint16) if set { c.logEnvOverrideOnce("GAS_UPDATER_BLOCK_DELAY", valLegacy) - c.logger.Warn("GAS_UPDATER_BLOCK_DELAY is deprecated, please use BLOCK_HISTORY_ESTIMATOR_BLOCK_DELAY instead (or simply remove to use the default)") - return valLegacy.(uint16) + c.logger.Error("GAS_UPDATER_BLOCK_DELAY is deprecated, please use BLOCK_HISTORY_ESTIMATOR_BLOCK_DELAY instead (or simply remove to use the default)") + return valLegacy } c.persistMu.RLock() p := c.persistedCfg.BlockHistoryEstimatorBlockDelay @@ -622,11 +623,11 @@ func (c *chainScopedConfig) BlockHistoryEstimatorBlockHistorySize() uint16 { c.logEnvOverrideOnce("BlockHistoryEstimatorBlockHistorySize", val) return val } - valLegacy, set := c.lookupEnv("GAS_UPDATER_BLOCK_HISTORY_SIZE", parse.Uint16) + valLegacy, set := lookupEnv(c, "GAS_UPDATER_BLOCK_HISTORY_SIZE", parse.Uint16) if set { c.logEnvOverrideOnce("GAS_UPDATER_BLOCK_HISTORY_SIZE", valLegacy) - c.logger.Warn("GAS_UPDATER_BLOCK_HISTORY_SIZE is deprecated, please use BLOCK_HISTORY_ESTIMATOR_BLOCK_HISTORY_SIZE instead (or simply remove to use the default)") - return valLegacy.(uint16) + c.logger.Error("GAS_UPDATER_BLOCK_HISTORY_SIZE is deprecated, please use BLOCK_HISTORY_ESTIMATOR_BLOCK_HISTORY_SIZE instead (or simply remove to use the default)") + return valLegacy } c.persistMu.RLock() p := c.persistedCfg.BlockHistoryEstimatorBlockHistorySize @@ -667,11 +668,11 @@ func (c *chainScopedConfig) BlockHistoryEstimatorTransactionPercentile() uint16 c.logEnvOverrideOnce("BlockHistoryEstimatorTransactionPercentile", val) return val } - valLegacy, set := c.lookupEnv("GAS_UPDATER_TRANSACTION_PERCENTILE", parse.Uint16) + valLegacy, set := lookupEnv(c, "GAS_UPDATER_TRANSACTION_PERCENTILE", parse.Uint16) if set { c.logEnvOverrideOnce("GAS_UPDATER_TRANSACTION_PERCENTILE", valLegacy) - c.logger.Warn("GAS_UPDATER_TRANSACTION_PERCENTILE is deprecated, please use BLOCK_HISTORY_ESTIMATOR_TRANSACTION_PERCENTILE instead (or simply remove to use the default)") - return valLegacy.(uint16) + c.logger.Error("GAS_UPDATER_TRANSACTION_PERCENTILE is deprecated, please use BLOCK_HISTORY_ESTIMATOR_TRANSACTION_PERCENTILE instead (or simply remove to use the default)") + return valLegacy } return c.defaultSet.blockHistoryEstimatorTransactionPercentile } @@ -683,14 +684,14 @@ func (c *chainScopedConfig) GasEstimatorMode() string { c.logEnvOverrideOnce("GasEstimatorMode", val) return val } - enabled, set := c.lookupEnv("GAS_UPDATER_ENABLED", parse.Bool) + enabled, set := lookupEnv(c, "GAS_UPDATER_ENABLED", parse.Bool) if set { c.logEnvOverrideOnce("GAS_UPDATER_ENABLED", enabled) if enabled.(bool) { - c.logger.Warn("GAS_UPDATER_ENABLED has been deprecated, to enable the block history estimator, please use GAS_ESTIMATOR_MODE=BlockHistory instead (or simply remove to use the default)") + c.logger.Error("GAS_UPDATER_ENABLED has been deprecated, to enable the block history estimator, please use GAS_ESTIMATOR_MODE=BlockHistory instead (or simply remove to use the default)") return "BlockHistory" } - c.logger.Warn("GAS_UPDATER_ENABLED has been deprecated, to disable the block history estimator, please use GAS_ESTIMATOR_MODE=FixedPrice instead (or simply remove to use the default)") + c.logger.Error("GAS_UPDATER_ENABLED has been deprecated, to disable the block history estimator, please use GAS_ESTIMATOR_MODE=FixedPrice instead (or simply remove to use the default)") return "FixedPrice" } c.persistMu.RLock() @@ -719,18 +720,18 @@ func (c *chainScopedConfig) KeySpecificMaxGasPriceWei(addr gethcommon.Address) * return c.EvmMaxGasPriceWei() } -func (c *chainScopedConfig) ChainType() chains.ChainType { +func (c *chainScopedConfig) ChainType() config.ChainType { val, ok := c.GeneralConfig.GlobalChainType() if ok { c.logEnvOverrideOnce("ChainType", val) - return chains.ChainType(val) + return config.ChainType(val) } c.persistMu.RLock() p := c.persistedCfg.ChainType c.persistMu.RUnlock() if p.Valid { c.logPersistedOverrideOnce("ChainType", p.String) - return chains.ChainType(p.String) + return config.ChainType(p.String) } return c.defaultSet.chainType } @@ -866,6 +867,23 @@ func (c *chainScopedConfig) EvmNonceAutoSync() bool { return c.defaultSet.nonceAutoSync } +// EvmUseForwarders enables/disables sending transactions through forwarder contracts +func (c *chainScopedConfig) EvmUseForwarders() bool { + val, ok := c.GeneralConfig.GlobalEvmUseForwarders() + if ok { + c.logEnvOverrideOnce("EvmUseForwarders", val) + return val + } + c.persistMu.RLock() + p := c.persistedCfg.EvmUseForwarders + c.persistMu.RUnlock() + if p.Valid { + c.logPersistedOverrideOnce("EvmUseForwarders", p.Bool) + return p.Bool + } + return c.defaultSet.useForwarders +} + // EvmGasLimitMultiplier is a factor by which a transaction's GasLimit is // multiplied before transmission. So if the value is 1.1, and the GasLimit for // a transaction is 10, 10% will be added before transmission. @@ -943,6 +961,23 @@ func (c *chainScopedConfig) EthTxReaperThreshold() time.Duration { return c.defaultSet.ethTxReaperThreshold } +// EvmLogPollInterval how fast we poll for new logs. +func (c *chainScopedConfig) EvmLogPollInterval() time.Duration { + val, ok := c.GeneralConfig.GlobalEvmLogPollInterval() + if ok { + c.logEnvOverrideOnce("EvmLogPollInterval", val) + return val + } + c.persistMu.RLock() + p := c.persistedCfg.EvmLogPollInterval + c.persistMu.RUnlock() + if p != nil { + c.logPersistedOverrideOnce("EvmLogPollInterval", *p) + return p.Duration() + } + return c.defaultSet.logPollInterval +} + // EvmLogBackfillBatchSize sets the batch size for calling FilterLogs when we backfill missing logs func (c *chainScopedConfig) EvmLogBackfillBatchSize() uint32 { val, ok := c.GeneralConfig.GlobalEvmLogBackfillBatchSize() @@ -1111,10 +1146,10 @@ func (c *chainScopedConfig) NodePollInterval() time.Duration { return c.defaultSet.nodePollInterval } -func (c *chainScopedConfig) lookupEnv(k string, parse func(string) (interface{}, error)) (interface{}, bool) { +func lookupEnv[T any](c *chainScopedConfig, k string, parse func(string) (T, error)) (t T, ok bool) { s, ok := os.LookupEnv(k) if !ok { - return nil, false + return } val, err := parse(s) if err == nil { @@ -1122,5 +1157,5 @@ func (c *chainScopedConfig) lookupEnv(k string, parse func(string) (interface{}, } c.logger.Errorw(fmt.Sprintf("Invalid value provided for %s, falling back to default.", s), "value", s, "key", k, "error", err) - return nil, false + return } diff --git a/core/chains/evm/config/config_test.go b/core/chains/evm/config/config_test.go index 81d084bc197..146151a019d 100644 --- a/core/chains/evm/config/config_test.go +++ b/core/chains/evm/config/config_test.go @@ -12,10 +12,9 @@ import ( "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v4" - "github.com/smartcontractkit/chainlink/core/chains" evmconfig "github.com/smartcontractkit/chainlink/core/chains/evm/config" - evmmocks "github.com/smartcontractkit/chainlink/core/chains/evm/mocks" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/internal/testutils/configtest" @@ -24,8 +23,7 @@ import ( ) func TestChainScopedConfig(t *testing.T) { - orm := new(evmmocks.ORM) - orm.Test(t) + orm := make(fakeChainConfigORM) chainID := big.NewInt(rand.Int63()) gcfg := configtest.NewTestGeneralConfig(t) lggr := logger.TestLogger(t).With("evmChainID", chainID.String()) @@ -37,13 +35,15 @@ func TestChainScopedConfig(t *testing.T) { t.Run("sets the gas price", func(t *testing.T) { assert.Equal(t, big.NewInt(20000000000), cfg.EvmGasPriceDefault()) - orm.On("StoreString", chainID, "EvmGasPriceDefault", "42000000000").Return(nil) err := cfg.SetEvmGasPriceDefault(big.NewInt(42000000000)) assert.NoError(t, err) assert.Equal(t, big.NewInt(42000000000), cfg.EvmGasPriceDefault()) - orm.AssertExpectations(t) + got, ok := orm.LoadString(*utils.NewBig(chainID), "EvmGasPriceDefault") + if assert.True(t, ok) { + assert.Equal(t, "42000000000", got) + } }) t.Run("is not allowed to set gas price to below EvmMinGasPriceWei", func(t *testing.T) { assert.Equal(t, big.NewInt(1000000000), cfg.EvmMinGasPriceWei()) @@ -124,8 +124,7 @@ func TestChainScopedConfig(t *testing.T) { } func TestChainScopedConfig_BSCDefaults(t *testing.T) { - orm := new(evmmocks.ORM) - orm.Test(t) + orm := make(fakeChainConfigORM) chainID := big.NewInt(56) gcfg := configtest.NewTestGeneralConfig(t) lggr := logger.TestLogger(t).With("evmChainID", chainID.String()) @@ -197,7 +196,7 @@ func Test_chainScopedConfig_Validate(t *testing.T) { gcfg := cltest.NewTestGeneralConfig(t) lggr := logger.TestLogger(t) cfg := evmconfig.NewChainScopedConfig(big.NewInt(0), evmtypes.ChainCfg{ - ChainType: null.StringFrom(string(chains.Arbitrum)), + ChainType: null.StringFrom(string(config.ChainArbitrum)), GasEstimatorMode: null.StringFrom("BlockHistory"), }, nil, lggr, gcfg) assert.Error(t, cfg.Validate()) @@ -225,7 +224,7 @@ func Test_chainScopedConfig_Validate(t *testing.T) { gcfg := cltest.NewTestGeneralConfig(t) lggr := logger.TestLogger(t) cfg := evmconfig.NewChainScopedConfig(big.NewInt(0), evmtypes.ChainCfg{ - ChainType: null.StringFrom(string(chains.Optimism)), + ChainType: null.StringFrom(string(config.ChainOptimism)), GasEstimatorMode: null.StringFrom("BlockHistory"), }, nil, lggr, gcfg) assert.Error(t, cfg.Validate()) @@ -248,3 +247,32 @@ func Test_chainScopedConfig_Validate(t *testing.T) { }) }) } + +type fakeChainConfigORM map[string]map[string]string + +func (f fakeChainConfigORM) LoadString(chainID utils.Big, key string) (val string, ok bool) { + var m map[string]string + m, ok = f[chainID.String()] + if ok { + val, ok = m[key] + } + return +} + +func (f fakeChainConfigORM) StoreString(chainID utils.Big, key, val string) error { + m, ok := f[chainID.String()] + if !ok { + m = make(map[string]string) + f[chainID.String()] = m + } + m[key] = val + return nil +} + +func (f fakeChainConfigORM) Clear(chainID utils.Big, key string) error { + m, ok := f[chainID.String()] + if ok { + delete(m, key) + } + return nil +} diff --git a/core/chains/evm/config/mocks/chain_scoped_config.go b/core/chains/evm/config/mocks/chain_scoped_config.go index a3978fbdb54..87e06b79299 100644 --- a/core/chains/evm/config/mocks/chain_scoped_config.go +++ b/core/chains/evm/config/mocks/chain_scoped_config.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks @@ -7,8 +7,6 @@ import ( assets "github.com/smartcontractkit/chainlink/core/assets" - chains "github.com/smartcontractkit/chainlink/core/chains" - common "github.com/ethereum/go-ethereum/common" commontypes "github.com/smartcontractkit/libocr/commontypes" @@ -490,14 +488,14 @@ func (_m *ChainScopedConfig) ChainID() *big.Int { } // ChainType provides a mock function with given fields: -func (_m *ChainScopedConfig) ChainType() chains.ChainType { +func (_m *ChainScopedConfig) ChainType() coreconfig.ChainType { ret := _m.Called() - var r0 chains.ChainType - if rf, ok := ret.Get(0).(func() chains.ChainType); ok { + var r0 coreconfig.ChainType + if rf, ok := ret.Get(0).(func() coreconfig.ChainType); ok { r0 = rf() } else { - r0 = ret.Get(0).(chains.ChainType) + r0 = ret.Get(0).(coreconfig.ChainType) } return r0 @@ -1123,6 +1121,20 @@ func (_m *ChainScopedConfig) EvmLogBackfillBatchSize() uint32 { return r0 } +// EvmLogPollInterval provides a mock function with given fields: +func (_m *ChainScopedConfig) EvmLogPollInterval() time.Duration { + ret := _m.Called() + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + // EvmMaxGasPriceWei provides a mock function with given fields: func (_m *ChainScopedConfig) EvmMaxGasPriceWei() *big.Int { ret := _m.Called() @@ -1211,6 +1223,20 @@ func (_m *ChainScopedConfig) EvmRPCDefaultBatchSize() uint32 { return r0 } +// EvmUseForwarders provides a mock function with given fields: +func (_m *ChainScopedConfig) EvmUseForwarders() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + // ExplorerAccessKey provides a mock function with given fields: func (_m *ChainScopedConfig) ExplorerAccessKey() string { ret := _m.Called() @@ -1311,6 +1337,20 @@ func (_m *ChainScopedConfig) FeatureFeedsManager() bool { return r0 } +// FeatureLogPoller provides a mock function with given fields: +func (_m *ChainScopedConfig) FeatureLogPoller() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + // FeatureOffchainReporting provides a mock function with given fields: func (_m *ChainScopedConfig) FeatureOffchainReporting() bool { ret := _m.Called() @@ -1640,27 +1680,6 @@ func (_m *ChainScopedConfig) GlobalEthTxResendAfterThreshold() (time.Duration, b return r0, r1 } -// GlobalEvmDefaultBatchSize provides a mock function with given fields: -func (_m *ChainScopedConfig) GlobalEvmDefaultBatchSize() (uint32, bool) { - ret := _m.Called() - - var r0 uint32 - if rf, ok := ret.Get(0).(func() uint32); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint32) - } - - var r1 bool - if rf, ok := ret.Get(1).(func() bool); ok { - r1 = rf() - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 -} - // GlobalEvmEIP1559DynamicFees provides a mock function with given fields: func (_m *ChainScopedConfig) GlobalEvmEIP1559DynamicFees() (bool, bool) { ret := _m.Called() @@ -2028,6 +2047,27 @@ func (_m *ChainScopedConfig) GlobalEvmLogBackfillBatchSize() (uint32, bool) { return r0, r1 } +// GlobalEvmLogPollInterval provides a mock function with given fields: +func (_m *ChainScopedConfig) GlobalEvmLogPollInterval() (time.Duration, bool) { + ret := _m.Called() + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + var r1 bool + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + // GlobalEvmMaxGasPriceWei provides a mock function with given fields: func (_m *ChainScopedConfig) GlobalEvmMaxGasPriceWei() (*big.Int, bool) { ret := _m.Called() @@ -2158,6 +2198,27 @@ func (_m *ChainScopedConfig) GlobalEvmRPCDefaultBatchSize() (uint32, bool) { return r0, r1 } +// GlobalEvmUseForwarders provides a mock function with given fields: +func (_m *ChainScopedConfig) GlobalEvmUseForwarders() (bool, bool) { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + var r1 bool + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + // GlobalFlagsContractAddress provides a mock function with given fields: func (_m *ChainScopedConfig) GlobalFlagsContractAddress() (string, bool) { ret := _m.Called() @@ -2685,6 +2746,34 @@ func (_m *ChainScopedConfig) KeeperRegistrySyncUpkeepQueueSize() uint32 { return r0 } +// KeeperTurnFlagEnabled provides a mock function with given fields: +func (_m *ChainScopedConfig) KeeperTurnFlagEnabled() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// KeeperTurnLookBack provides a mock function with given fields: +func (_m *ChainScopedConfig) KeeperTurnLookBack() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + // KeyFile provides a mock function with given fields: func (_m *ChainScopedConfig) KeyFile() string { ret := _m.Called() @@ -3963,6 +4052,34 @@ func (_m *ChainScopedConfig) SolanaEnabled() bool { return r0 } +// SolanaNodes provides a mock function with given fields: +func (_m *ChainScopedConfig) SolanaNodes() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// TerraNodes provides a mock function with given fields: +func (_m *ChainScopedConfig) TerraNodes() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + // TLSCertPath provides a mock function with given fields: func (_m *ChainScopedConfig) TLSCertPath() string { ret := _m.Called() diff --git a/core/chains/evm/forwarders/mocks/ORM.go b/core/chains/evm/forwarders/mocks/orm.go similarity index 97% rename from core/chains/evm/forwarders/mocks/ORM.go rename to core/chains/evm/forwarders/mocks/orm.go index e13d934fe3f..09d12709fad 100644 --- a/core/chains/evm/forwarders/mocks/ORM.go +++ b/core/chains/evm/forwarders/mocks/orm.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/gas/block_history_estimator.go b/core/chains/evm/gas/block_history_estimator.go index 7354c524c49..81dba00e4e4 100644 --- a/core/chains/evm/gas/block_history_estimator.go +++ b/core/chains/evm/gas/block_history_estimator.go @@ -72,7 +72,7 @@ type ( chainID big.Int config Config rollingBlockHistory []Block - mb *utils.Mailbox + mb *utils.Mailbox[*evmtypes.Head] wg *sync.WaitGroup ctx context.Context ctxCancel context.CancelFunc @@ -97,7 +97,7 @@ func NewBlockHistoryEstimator(lggr logger.Logger, ethClient evmclient.Client, cf chainID, cfg, make([]Block, 0), - utils.NewMailbox(1), + utils.NewMailbox[*evmtypes.Head](1), new(sync.WaitGroup), ctx, cancel, @@ -288,8 +288,7 @@ func (b *BlockHistoryEstimator) runLoop() { b.logger.Debug("No head to retrieve") continue } - h := evmtypes.AsHead(head) - b.FetchBlocksAndRecalculate(b.ctx, h) + b.FetchBlocksAndRecalculate(b.ctx, head) } } } diff --git a/core/chains/evm/gas/block_history_estimator_test.go b/core/chains/evm/gas/block_history_estimator_test.go index 31c73658e20..adf48e7e3f4 100644 --- a/core/chains/evm/gas/block_history_estimator_test.go +++ b/core/chains/evm/gas/block_history_estimator_test.go @@ -18,11 +18,11 @@ import ( "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink/core/assets" - "github.com/smartcontractkit/chainlink/core/chains" evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" "github.com/smartcontractkit/chainlink/core/chains/evm/gas" gumocks "github.com/smartcontractkit/chainlink/core/chains/evm/gas/mocks" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" + cfg "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/logger" @@ -33,7 +33,7 @@ func newConfigWithEIP1559DynamicFeesEnabled(t *testing.T) *gumocks.Config { config := new(gumocks.Config) config.Test(t) config.On("EvmEIP1559DynamicFees").Maybe().Return(true) - config.On("ChainType").Maybe().Return(chains.ChainType("")) + config.On("ChainType").Maybe().Return(cfg.ChainType("")) return config } @@ -41,7 +41,7 @@ func newConfigWithEIP1559DynamicFeesDisabled(t *testing.T) *gumocks.Config { config := new(gumocks.Config) config.Test(t) config.On("EvmEIP1559DynamicFees").Maybe().Return(false) - config.On("ChainType").Maybe().Return(chains.ChainType("")) + config.On("ChainType").Maybe().Return(cfg.ChainType("")) return config } diff --git a/core/chains/evm/gas/chain_specific.go b/core/chains/evm/gas/chain_specific.go index 7323724a9c2..edaf24242ff 100644 --- a/core/chains/evm/gas/chain_specific.go +++ b/core/chains/evm/gas/chain_specific.go @@ -1,11 +1,11 @@ package gas -import "github.com/smartcontractkit/chainlink/core/chains" +import "github.com/smartcontractkit/chainlink/core/config" // chainSpecificIsUsable allows for additional logic specific to a particular // Config that determines whether a transaction should be used for gas estimation func (tx *Transaction) chainSpecificIsUsable(cfg Config) bool { - if cfg.ChainType() == chains.XDai { + if cfg.ChainType() == config.ChainXDai { // GasPrice 0 on most chains is great since it indicates cheap/free transactions. // However, xDai reserves a special type of "bridge" transaction with 0 gas // price that is always processed at top priority. Ordinary transactions diff --git a/core/chains/evm/gas/mocks/config.go b/core/chains/evm/gas/mocks/config.go index 95da3e35622..abef8b6c544 100644 --- a/core/chains/evm/gas/mocks/config.go +++ b/core/chains/evm/gas/mocks/config.go @@ -1,11 +1,11 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks import ( big "math/big" - chains "github.com/smartcontractkit/chainlink/core/chains" + config "github.com/smartcontractkit/chainlink/core/config" mock "github.com/stretchr/testify/mock" ) @@ -86,14 +86,14 @@ func (_m *Config) BlockHistoryEstimatorTransactionPercentile() uint16 { } // ChainType provides a mock function with given fields: -func (_m *Config) ChainType() chains.ChainType { +func (_m *Config) ChainType() config.ChainType { ret := _m.Called() - var r0 chains.ChainType - if rf, ok := ret.Get(0).(func() chains.ChainType); ok { + var r0 config.ChainType + if rf, ok := ret.Get(0).(func() config.ChainType); ok { r0 = rf() } else { - r0 = ret.Get(0).(chains.ChainType) + r0 = ret.Get(0).(config.ChainType) } return r0 diff --git a/core/chains/evm/gas/mocks/estimator.go b/core/chains/evm/gas/mocks/estimator.go index 29c5fddee0e..38fb994582c 100644 --- a/core/chains/evm/gas/mocks/estimator.go +++ b/core/chains/evm/gas/mocks/estimator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/gas/mocks/optimism_rpc_client.go b/core/chains/evm/gas/mocks/optimism_rpc_client.go index ff1cc53703b..3344346ef25 100644 --- a/core/chains/evm/gas/mocks/optimism_rpc_client.go +++ b/core/chains/evm/gas/mocks/optimism_rpc_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/gas/models.go b/core/chains/evm/gas/models.go index cf2f1b862a6..c5c4ca84264 100644 --- a/core/chains/evm/gas/models.go +++ b/core/chains/evm/gas/models.go @@ -14,11 +14,11 @@ import ( "github.com/pkg/errors" "github.com/shopspring/decimal" - "github.com/smartcontractkit/chainlink/core/chains" evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/core/chains/evm/label" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/logger" - "github.com/smartcontractkit/chainlink/core/static" ) var ( @@ -105,7 +105,7 @@ type Config interface { BlockHistoryEstimatorBlockHistorySize() uint16 BlockHistoryEstimatorTransactionPercentile() uint16 BlockHistoryEstimatorEIP1559FeeCapBufferBlocks() uint16 - ChainType() chains.ChainType + ChainType() config.ChainType EvmEIP1559DynamicFees() bool EvmFinalityDepth() uint32 EvmGasBumpPercent() uint16 @@ -304,7 +304,7 @@ func bumpGasPrice(cfg Config, lggr logger.SugaredLogger, currentGasPrice, origin } if bumpedGasPrice.Cmp(maxGasPrice) > 0 { return maxGasPrice, errors.Wrapf(ErrBumpGasExceedsLimit, "bumped gas price of %s would exceed configured max gas price of %s (original price was %s). %s", - bumpedGasPrice.String(), maxGasPrice, originalGasPrice.String(), static.EthNodeConnectivityProblemLabel) + bumpedGasPrice.String(), maxGasPrice, originalGasPrice.String(), label.NodeConnectivityProblemWarning) } else if bumpedGasPrice.Cmp(originalGasPrice) == 0 { // NOTE: This really shouldn't happen since we enforce minimums for // ETH_GAS_BUMP_PERCENT and ETH_GAS_BUMP_WEI in the config validation, @@ -324,7 +324,7 @@ func max(a, b *big.Int) *big.Int { } // BumpDynamicFeeOnly bumps the tip cap and max gas price if necessary -func BumpDynamicFeeOnly(config Config, lggr logger.Logger, currentTipCap *big.Int, currentBaseFee *big.Int, originalFee DynamicFee, originalGasLimit uint64) (bumped DynamicFee, chainSpecificGasLimit uint64, err error) { +func BumpDynamicFeeOnly(config Config, lggr logger.SugaredLogger, currentTipCap *big.Int, currentBaseFee *big.Int, originalFee DynamicFee, originalGasLimit uint64) (bumped DynamicFee, chainSpecificGasLimit uint64, err error) { bumped, err = bumpDynamicFee(config, lggr, currentTipCap, currentBaseFee, originalFee) if err != nil { return bumped, 0, err @@ -343,7 +343,7 @@ func BumpDynamicFeeOnly(config Config, lggr logger.Logger, currentTipCap *big.In // the Tip only. Unfortunately due to a flaw of how EIP-1559 is implemented we // have to bump FeeCap by at least 10% each time we bump the tip cap. // See: https://github.com/ethereum/go-ethereum/issues/24284 -func bumpDynamicFee(cfg Config, lggr logger.Logger, currentTipCap, currentBaseFee *big.Int, originalFee DynamicFee) (bumpedFee DynamicFee, err error) { +func bumpDynamicFee(cfg Config, lggr logger.SugaredLogger, currentTipCap, currentBaseFee *big.Int, originalFee DynamicFee) (bumpedFee DynamicFee, err error) { maxGasPrice := cfg.EvmMaxGasPriceWei() baselineTipCap := max(originalFee.TipCap, cfg.EvmGasTipCapDefault()) @@ -351,7 +351,7 @@ func bumpDynamicFee(cfg Config, lggr logger.Logger, currentTipCap, currentBaseFe if currentTipCap != nil { if currentTipCap.Cmp(maxGasPrice) > 0 { - lggr.Errorf("AssumptionViolation: Ignoring current tip cap of %s that would exceed max gas price of %s", currentTipCap.String(), maxGasPrice.String()) + lggr.AssumptionViolationf("Ignoring current tip cap of %s that would exceed max gas price of %s", currentTipCap.String(), maxGasPrice.String()) } else if bumpedTipCap.Cmp(currentTipCap) < 0 { // If the current gas tip cap is higher than the old tip cap with bump applied, use that instead bumpedTipCap = currentTipCap @@ -359,7 +359,7 @@ func bumpDynamicFee(cfg Config, lggr logger.Logger, currentTipCap, currentBaseFe } if bumpedTipCap.Cmp(maxGasPrice) > 0 { return bumpedFee, errors.Wrapf(ErrBumpGasExceedsLimit, "bumped tip cap of %s would exceed configured max gas price of %s (original fee: tip cap %s, fee cap %s). %s", - bumpedTipCap.String(), maxGasPrice, originalFee.TipCap.String(), originalFee.FeeCap.String(), static.EthNodeConnectivityProblemLabel) + bumpedTipCap.String(), maxGasPrice, originalFee.TipCap.String(), originalFee.FeeCap.String(), label.NodeConnectivityProblemWarning) } else if bumpedTipCap.Cmp(originalFee.TipCap) <= 0 { // NOTE: This really shouldn't happen since we enforce minimums for // ETH_GAS_BUMP_PERCENT and ETH_GAS_BUMP_WEI in the config validation, @@ -385,7 +385,7 @@ func bumpDynamicFee(cfg Config, lggr logger.Logger, currentTipCap, currentBaseFe if bumpedFeeCap.Cmp(maxGasPrice) > 0 { return bumpedFee, errors.Wrapf(ErrBumpGasExceedsLimit, "bumped fee cap of %s would exceed configured max gas price of %s (original fee: tip cap %s, fee cap %s). %s", - bumpedFeeCap.String(), maxGasPrice, originalFee.TipCap.String(), originalFee.FeeCap.String(), static.EthNodeConnectivityProblemLabel) + bumpedFeeCap.String(), maxGasPrice, originalFee.TipCap.String(), originalFee.FeeCap.String(), label.NodeConnectivityProblemWarning) } return DynamicFee{FeeCap: bumpedFeeCap, TipCap: bumpedTipCap}, nil diff --git a/core/chains/evm/headtracker/head_broadcaster.go b/core/chains/evm/headtracker/head_broadcaster.go index be65766b88b..616af553dc9 100644 --- a/core/chains/evm/headtracker/head_broadcaster.go +++ b/core/chains/evm/headtracker/head_broadcaster.go @@ -30,7 +30,7 @@ func NewHeadBroadcaster(lggr logger.Logger) httypes.HeadBroadcaster { return &headBroadcaster{ logger: lggr.Named(logger.HeadBroadcaster), callbacks: make(callbackSet), - mailbox: utils.NewMailbox(1), + mailbox: utils.NewMailbox[*evmtypes.Head](1), mutex: &sync.Mutex{}, chClose: make(chan struct{}), wgDone: sync.WaitGroup{}, @@ -41,7 +41,7 @@ func NewHeadBroadcaster(lggr logger.Logger) httypes.HeadBroadcaster { type headBroadcaster struct { logger logger.Logger callbacks callbackSet - mailbox *utils.Mailbox + mailbox *utils.Mailbox[*evmtypes.Head] mutex *sync.Mutex chClose chan struct{} wgDone sync.WaitGroup @@ -112,12 +112,11 @@ func (hb *headBroadcaster) run() { // Jobs should expect to the relayer to skip heads if there is a large number of listeners // and all callbacks cannot be completed in the allotted time. func (hb *headBroadcaster) executeCallbacks() { - item, exists := hb.mailbox.Retrieve() + head, exists := hb.mailbox.Retrieve() if !exists { hb.logger.Info("No head to retrieve. It might have been skipped") return } - head := evmtypes.AsHead(item) hb.mutex.Lock() callbacks := hb.callbacks.values() diff --git a/core/chains/evm/headtracker/head_tracker.go b/core/chains/evm/headtracker/head_tracker.go index a69d613dc48..761e8fed676 100644 --- a/core/chains/evm/headtracker/head_tracker.go +++ b/core/chains/evm/headtracker/head_tracker.go @@ -43,8 +43,8 @@ type headTracker struct { chainID big.Int config Config - backfillMB *utils.Mailbox - broadcastMB *utils.Mailbox + backfillMB *utils.Mailbox[*evmtypes.Head] + broadcastMB *utils.Mailbox[*evmtypes.Head] headListener httypes.HeadListener chStop chan struct{} wgDone sync.WaitGroup @@ -67,8 +67,8 @@ func NewHeadTracker( chainID: *ethClient.ChainID(), config: config, log: lggr, - backfillMB: utils.NewMailbox(1), - broadcastMB: utils.NewMailbox(HeadsBufferSize), + backfillMB: utils.NewMailbox[*evmtypes.Head](1), + broadcastMB: utils.NewMailbox[*evmtypes.Head](HeadsBufferSize), chStop: chStop, headListener: NewHeadListener(lggr, ethClient, config, chStop), headSaver: headSaver, @@ -228,7 +228,7 @@ func (ht *headTracker) broadcastLoop() { if item == nil { continue } - ht.headBroadcaster.BroadcastNewLongestChain(evmtypes.AsHead(item)) + ht.headBroadcaster.BroadcastNewLongestChain(item) } } } else { @@ -243,7 +243,7 @@ func (ht *headTracker) broadcastLoop() { if !exists { break } - ht.headBroadcaster.BroadcastNewLongestChain(evmtypes.AsHead(item)) + ht.headBroadcaster.BroadcastNewLongestChain(item) } } } @@ -262,11 +262,10 @@ func (ht *headTracker) backfillLoop() { return case <-ht.backfillMB.Notify(): for { - item, exists := ht.backfillMB.Retrieve() + head, exists := ht.backfillMB.Retrieve() if !exists { break } - head := evmtypes.AsHead(item) { err := ht.Backfill(ctx, head, uint(ht.config.EvmFinalityDepth())) if err != nil { diff --git a/core/chains/evm/headtracker/head_tracker_test.go b/core/chains/evm/headtracker/head_tracker_test.go index a56a3549f75..76c9e42c894 100644 --- a/core/chains/evm/headtracker/head_tracker_test.go +++ b/core/chains/evm/headtracker/head_tracker_test.go @@ -28,7 +28,6 @@ import ( httypes "github.com/smartcontractkit/chainlink/core/chains/evm/headtracker/types" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" "github.com/smartcontractkit/chainlink/core/internal/cltest" - "github.com/smartcontractkit/chainlink/core/internal/cltest/heavyweight" "github.com/smartcontractkit/chainlink/core/internal/testutils/configtest" "github.com/smartcontractkit/chainlink/core/internal/testutils/evmtest" "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" @@ -54,12 +53,14 @@ func TestHeadTracker_New(t *testing.T) { db := pgtest.NewSqlxDB(t) logger := logger.TestLogger(t) config := cltest.NewTestGeneralConfig(t) - - ethClient, sub := cltest.NewEthClientAndSubMockWithDefaultChain(t) - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + mockEth := &evmtest.MockEth{EthClient: ethClient} + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { return mockEth.NewSub(t) }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil) - sub.On("Unsubscribe").Maybe().Return(nil) - sub.On("Err").Return(nil) orm := headtracker.NewORM(db, logger, config, cltest.FixtureChainID) assert.Nil(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(1))) @@ -128,13 +129,19 @@ func TestHeadTracker_Get(t *testing.T) { config := newCfg(t) orm := headtracker.NewORM(db, logger, config, cltest.FixtureChainID) - ethClient, sub := cltest.NewEthClientAndSubMockWithDefaultChain(t) - sub.On("Err").Return(nil) - sub.On("Unsubscribe").Return(nil) + ethClient := cltest.NewEthClientMockWithDefaultChain(t) chStarted := make(chan struct{}) + mockEth := &evmtest.MockEth{ + EthClient: ethClient, + } ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). - Run(func(mock.Arguments) { close(chStarted) }). - Return(sub, nil) + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + defer close(chStarted) + return mockEth.NewSub(t) + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil) fnCall := ethClient.On("HeadByNumber", mock.Anything, mock.Anything) @@ -168,14 +175,18 @@ func TestHeadTracker_Start_NewHeads(t *testing.T) { config := newCfg(t) orm := headtracker.NewORM(db, logger, config, cltest.FixtureChainID) - ethClient, sub := cltest.NewEthClientAndSubMockWithDefaultChain(t) - sub.On("Err").Return(nil) - sub.On("Unsubscribe").Return(nil) + ethClient := cltest.NewEthClientMockWithDefaultChain(t) chStarted := make(chan struct{}) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil) + mockEth := &evmtest.MockEth{EthClient: ethClient} ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). - Run(func(mock.Arguments) { close(chStarted) }). - Return(sub, nil) + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + defer close(chStarted) + return mockEth.NewSub(t) + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) ht := createHeadTracker(t, ethClient, config, orm) ht.Start(t) @@ -192,9 +203,7 @@ func TestHeadTracker_Start_CancelContext(t *testing.T) { logger := logger.TestLogger(t) config := newCfg(t) orm := headtracker.NewORM(db, logger, config, cltest.FixtureChainID) - ethClient, sub := cltest.NewEthClientAndSubMockWithDefaultChain(t) - sub.On("Err").Return(nil) - sub.On("Unsubscribe").Return(nil) + ethClient := cltest.NewEthClientMockWithDefaultChain(t) chStarted := make(chan struct{}) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Run(func(args mock.Arguments) { ctx := args.Get(0).(context.Context) @@ -205,9 +214,15 @@ func TestHeadTracker_Start_CancelContext(t *testing.T) { assert.FailNow(t, "context was not cancelled within 10s") } }).Return(cltest.Head(0), nil) + mockEth := &evmtest.MockEth{EthClient: ethClient} ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). - Run(func(mock.Arguments) { close(chStarted) }). - Return(sub, nil) + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + defer close(chStarted) + return mockEth.NewSub(t) + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) ht := createHeadTracker(t, ethClient, config, orm) @@ -229,19 +244,21 @@ func TestHeadTracker_CallsHeadTrackableCallbacks(t *testing.T) { config := newCfg(t) orm := headtracker.NewORM(db, logger, config, cltest.FixtureChainID) - ethClient, sub := cltest.NewEthClientAndSubMockWithDefaultChain(t) + ethClient := cltest.NewEthClientMockWithDefaultChain(t) - chchHeaders := make(chan chan<- *evmtypes.Head, 1) + chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) + mockEth := &evmtest.MockEth{EthClient: ethClient} ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). - Run(func(args mock.Arguments) { - chchHeaders <- args.Get(1).(chan<- *evmtypes.Head) - }). - Return(sub, nil) + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(0), nil) - sub.On("Unsubscribe").Return() - sub.On("Err").Return(nil) - checker := &cltest.MockHeadTrackable{} ht := createHeadTrackerWithChecker(t, ethClient, config, orm, checker) @@ -249,7 +266,7 @@ func TestHeadTracker_CallsHeadTrackableCallbacks(t *testing.T) { assert.Equal(t, int32(0), checker.OnNewLongestChainCount()) headers := <-chchHeaders - headers <- &evmtypes.Head{Number: 1, Hash: utils.NewHash(), EVMChainID: utils.NewBig(&cltest.FixtureChainID)} + headers.TrySend(&evmtypes.Head{Number: 1, Hash: utils.NewHash(), EVMChainID: utils.NewBig(&cltest.FixtureChainID)}) g.Eventually(func() int32 { return checker.OnNewLongestChainCount() }).Should(gomega.Equal(int32(1))) ht.Stop(t) @@ -265,14 +282,20 @@ func TestHeadTracker_ReconnectOnError(t *testing.T) { config := newCfg(t) orm := headtracker.NewORM(db, logger, config, cltest.FixtureChainID) - ethClient, sub := cltest.NewEthClientAndSubMockWithDefaultChain(t) - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil) + ethClient := cltest.NewEthClientMockWithDefaultChain(t) + mockEth := &evmtest.MockEth{EthClient: ethClient} + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { return mockEth.NewSub(t) }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, errors.New("cannot reconnect")) - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil) + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { return mockEth.NewSub(t) }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil) - chErr := make(chan error) - sub.On("Unsubscribe").Return() - sub.On("Err").Return((<-chan error)(chErr)) checker := &cltest.MockHeadTrackable{} ht := createHeadTrackerWithChecker(t, ethClient, config, orm, checker) @@ -282,7 +305,7 @@ func TestHeadTracker_ReconnectOnError(t *testing.T) { assert.Equal(t, int32(0), checker.OnNewLongestChainCount()) // trigger reconnect loop - chErr <- errors.New("test error to force reconnect") + mockEth.SubsErr(errors.New("test error to force reconnect")) g.Eventually(func() int32 { return checker.OnNewLongestChainCount() }).Should(gomega.Equal(int32(1))) @@ -297,18 +320,22 @@ func TestHeadTracker_ResubscribeOnSubscriptionError(t *testing.T) { config := newCfg(t) orm := headtracker.NewORM(db, logger, config, cltest.FixtureChainID) - ethClient, sub := cltest.NewEthClientAndSubMockWithDefaultChain(t) + ethClient := cltest.NewEthClientMockWithDefaultChain(t) - chchHeaders := make(chan chan<- *evmtypes.Head, 1) + chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) + mockEth := &evmtest.MockEth{EthClient: ethClient} ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). - Run(func(args mock.Arguments) { chchHeaders <- args.Get(1).(chan<- *evmtypes.Head) }). Twice(). - Return(sub, nil) + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(0), nil) - sub.On("Unsubscribe").Return() - sub.On("Err").Return(nil) - checker := &cltest.MockHeadTrackable{} ht := createHeadTrackerWithChecker(t, ethClient, config, orm, checker) @@ -317,13 +344,13 @@ func TestHeadTracker_ResubscribeOnSubscriptionError(t *testing.T) { headers := <-chchHeaders go func() { - headers <- cltest.Head(1) + headers.TrySend(cltest.Head(1)) }() g.Eventually(func() bool { return ht.headTracker.Healthy() == nil }, 5*time.Second, 5*time.Millisecond).Should(gomega.Equal(true)) // trigger reconnect loop - close(headers) + headers.CloseCh() // wait for full disconnect and a new subscription g.Eventually(func() int32 { return checker.OnNewLongestChainCount() }, 5*time.Second, 5*time.Millisecond).Should(gomega.Equal(int32(1))) @@ -335,9 +362,13 @@ func TestHeadTracker_Start_LoadsLatestChain(t *testing.T) { db := pgtest.NewSqlxDB(t) logger := logger.TestLogger(t) config := newCfg(t) - ethClient, sub := cltest.NewEthClientAndSubMockWithDefaultChain(t) - - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil) + ethClient := cltest.NewEthClientMockWithDefaultChain(t) + mockEth := &evmtest.MockEth{EthClient: ethClient} + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { return mockEth.NewSub(t) }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) heads := []*evmtypes.Head{ cltest.Head(0), @@ -357,9 +388,6 @@ func TestHeadTracker_Start_LoadsLatestChain(t *testing.T) { ethClient.On("HeadByNumber", mock.Anything, big.NewInt(1)).Return(heads[1], nil) ethClient.On("HeadByNumber", mock.Anything, big.NewInt(0)).Return(heads[0], nil) - sub.On("Unsubscribe").Return() - sub.On("Err").Return(nil) - orm := headtracker.NewORM(db, logger, config, cltest.FixtureChainID) trackable := new(htmocks.HeadTrackable) trackable.Test(t) @@ -380,30 +408,38 @@ func TestHeadTracker_Start_LoadsLatestChain(t *testing.T) { } func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingEnabled(t *testing.T) { - // Need separate db because ht.Stop() will cancel the ctx, causing a db connection - // close and go-txdb rollback. - config, db := heavyweight.FullTestDB(t, "switches_longest_chain", true, true) + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.TestLogger(t) + + config := cltest.NewTestGeneralConfig(t) config.Overrides.GlobalEvmFinalityDepth = null.IntFrom(50) // Need to set the buffer to something large since we inject a lot of heads at once and otherwise they will be dropped - config.Overrides.GlobalEvmHeadTrackerMaxBufferSize = null.IntFrom(42) + config.Overrides.GlobalEvmHeadTrackerMaxBufferSize = null.IntFrom(100) // Head sampling enabled d := 2500 * time.Millisecond config.Overrides.GlobalEvmHeadTrackerSamplingInterval = &d - ethClient, sub := cltest.NewEthClientAndSubMockWithDefaultChain(t) + ethClient := cltest.NewEthClientMockWithDefaultChain(t) checker := new(htmocks.HeadTrackable) checker.Test(t) - orm := headtracker.NewORM(db, logger.TestLogger(t), config, *config.DefaultChainID()) + orm := headtracker.NewORM(db, logger, config, *config.DefaultChainID()) ht := createHeadTrackerWithChecker(t, ethClient, evmtest.NewChainScopedConfig(t, config), orm, checker) - chchHeaders := make(chan chan<- *evmtypes.Head, 1) + chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) + mockEth := &evmtest.MockEth{EthClient: ethClient} ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). - Run(func(args mock.Arguments) { chchHeaders <- args.Get(1).(chan<- *evmtypes.Head) }). - Return(sub, nil) - sub.On("Unsubscribe").Return() - sub.On("Err").Return(nil) + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) // --------------------- blocks := cltest.NewBlocks(t, 10) @@ -488,10 +524,11 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingEnabled(t *testing.T) latestHeadByNumberMu.Lock() latestHeadByNumber[h.Number] = h latestHeadByNumberMu.Unlock() - headers <- h + headers.TrySend(h) } - lastLongestChainAwaiter.AwaitOrFail(t) + // default 10s may not be sufficient, so using testutils.WaitTimeout(t) + lastLongestChainAwaiter.AwaitOrFail(t, testutils.WaitTimeout(t)) ht.Stop(t) assert.Equal(t, int64(5), ht.headSaver.LatestChain().Number) @@ -507,30 +544,37 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingEnabled(t *testing.T) } func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingDisabled(t *testing.T) { - // Need separate db because ht.Stop() will cancel the ctx, causing a db connection - // close and go-txdb rollback. - config, db := heavyweight.FullTestDB(t, "switches_longest_chain", true, true) + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.TestLogger(t) + config := cltest.NewTestGeneralConfig(t) config.Overrides.GlobalEvmFinalityDepth = null.IntFrom(50) // Need to set the buffer to something large since we inject a lot of heads at once and otherwise they will be dropped - config.Overrides.GlobalEvmHeadTrackerMaxBufferSize = null.IntFrom(42) + config.Overrides.GlobalEvmHeadTrackerMaxBufferSize = null.IntFrom(100) d := 0 * time.Second config.Overrides.GlobalEvmHeadTrackerSamplingInterval = &d - ethClient, sub := cltest.NewEthClientAndSubMockWithDefaultChain(t) + ethClient := cltest.NewEthClientMockWithDefaultChain(t) checker := new(htmocks.HeadTrackable) checker.Test(t) - orm := headtracker.NewORM(db, logger.TestLogger(t), config, cltest.FixtureChainID) + orm := headtracker.NewORM(db, logger, config, cltest.FixtureChainID) evmcfg := evmtest.NewChainScopedConfig(t, config) ht := createHeadTrackerWithChecker(t, ethClient, evmcfg, orm, checker) - chchHeaders := make(chan chan<- *evmtypes.Head, 1) + chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) + mockEth := &evmtest.MockEth{EthClient: ethClient} ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). - Run(func(args mock.Arguments) { chchHeaders <- args.Get(1).(chan<- *evmtypes.Head) }). - Return(sub, nil) - sub.On("Unsubscribe").Return() - sub.On("Err").Return(nil) + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) // --------------------- blocks := cltest.NewBlocks(t, 10) @@ -643,10 +687,12 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingDisabled(t *testing.T latestHeadByNumberMu.Lock() latestHeadByNumber[h.Number] = h latestHeadByNumberMu.Unlock() - headers <- h + headers.TrySend(h) + time.Sleep(testutils.TestInterval) } - lastLongestChainAwaiter.AwaitOrFail(t) + // default 10s may not be sufficient, so using testutils.WaitTimeout(t) + lastLongestChainAwaiter.AwaitOrFail(t, testutils.WaitTimeout(t)) ht.Stop(t) assert.Equal(t, int64(5), ht.headSaver.LatestChain().Number) diff --git a/core/chains/evm/headtracker/mocks/config.go b/core/chains/evm/headtracker/mocks/config.go index 0788ae8bb70..d984e038c58 100644 --- a/core/chains/evm/headtracker/mocks/config.go +++ b/core/chains/evm/headtracker/mocks/config.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/headtracker/mocks/head_broadcaster.go b/core/chains/evm/headtracker/mocks/head_broadcaster.go index 202d8084154..4041d6c8ead 100644 --- a/core/chains/evm/headtracker/mocks/head_broadcaster.go +++ b/core/chains/evm/headtracker/mocks/head_broadcaster.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/headtracker/mocks/head_listener.go b/core/chains/evm/headtracker/mocks/head_listener.go index d12db3066d0..4ec672aa464 100644 --- a/core/chains/evm/headtracker/mocks/head_listener.go +++ b/core/chains/evm/headtracker/mocks/head_listener.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/headtracker/mocks/head_trackable.go b/core/chains/evm/headtracker/mocks/head_trackable.go index f77d62be3bb..95de3870799 100644 --- a/core/chains/evm/headtracker/mocks/head_trackable.go +++ b/core/chains/evm/headtracker/mocks/head_trackable.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/label/label.go b/core/chains/evm/label/label.go new file mode 100644 index 00000000000..b0712b12088 --- /dev/null +++ b/core/chains/evm/label/label.go @@ -0,0 +1,9 @@ +package label + +//nolint +const ( + MaxInFlightTransactionsWarning = `WARNING: If this happens a lot, you may need to increase ETH_MAX_IN_FLIGHT_TRANSACTIONS to boost your node's transaction throughput, however you do this at your own risk. You MUST first ensure your ethereum node is configured not to ever evict local transactions that exceed this number otherwise the node can get permanently stuck. See the documentation for more details: https://docs.chain.link/docs/configuration-variables/` + MaxQueuedTransactionsWarning = `WARNING: Hitting ETH_MAX_QUEUED_TRANSACTIONS is a sanity limit and should never happen under normal operation. Unless you are operating with very high throughput, this error is unlikely to be a problem with your Chainlink node configuration, and instead more likely to be caused by a problem with your eth node's connectivity. Check your eth node: it may not be broadcasting transactions to the network, or it might be overloaded and evicting Chainlink's transactions from its mempool. It is recommended to run Chainlink with multiple primary and sendonly nodes for redundancy and to ensure fast and reliable transaction propagation. Increasing ETH_MAX_QUEUED_TRANSACTIONS will allow Chainlink to buffer more unsent transactions, but you should only do this if you need very high burst transmission rates. If you don't need very high burst throughput, increasing this limit is not the correct action to take here and will probably make things worse` + NodeConnectivityProblemWarning = `WARNING: If this happens a lot, it may be a sign that your eth node has a connectivity problem, and your transactions are not making it to any miners. It is recommended to run Chainlink with multiple primary and sendonly nodes for redundancy and to ensure fast and reliable transaction propagation.` + RPCTxFeeCapConfiguredIncorrectlyWarning = `WARNING: Gas price was rejected by the eth node for being too high. By default, go-ethereum (and clones) have a built-in upper limit for gas price. It is preferable to disable this and rely Chainlink's internal gas limits instead. Your RPC node's RPCTxFeeCap needs to be disabled or increased (recommended configuration: --rpc.gascap=0 --rpc.txfeecap=0). If you want to limit Chainlink's max gas price, you may do so by setting ETH_MAX_GAS_PRICE_WEI on the Chainlink node. Chainlink will never send a transaction with a total cost higher than ETH_MAX_GAS_PRICE_WEI. See the docs for more details: https://docs.chain.link/docs/configuration-variables/` +) diff --git a/core/chains/evm/legacy.go b/core/chains/evm/legacy.go index 8c975b1d587..fcc47f0632b 100644 --- a/core/chains/evm/legacy.go +++ b/core/chains/evm/legacy.go @@ -10,11 +10,11 @@ import ( "github.com/pkg/errors" "gopkg.in/guregu/null.v4" + "github.com/smartcontractkit/sqlx" + evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" "github.com/smartcontractkit/chainlink/core/logger" - "github.com/smartcontractkit/chainlink/core/services/pg" "github.com/smartcontractkit/chainlink/core/utils" - "github.com/smartcontractkit/sqlx" ) type LegacyEthNodeConfig interface { @@ -61,8 +61,8 @@ For more information on configuring your node, check the docs: https://docs.chai ` func ClobberDBFromEnv(db *sqlx.DB, config LegacyEthNodeConfig, lggr logger.Logger) error { - if err := SetupMultiplePrimaries(db, config, lggr); err != nil { - return errors.Wrap(err, "failed to setup multiple primary nodes") + if err := SetupNodes(db, config, lggr); err != nil { + return errors.Wrap(err, "failed to setup EVM nodes") } primaryWS := config.EthereumURL() @@ -109,43 +109,38 @@ func ClobberDBFromEnv(db *sqlx.DB, config LegacyEthNodeConfig, lggr logger.Logge return nil } -// SetupMultiplePrimaries is a hack/shim method to allow node operators to -// specify multiple nodes via ENV +// SetupNodes is a hack/shim method to allow node operators to specify multiple nodes via ENV. // See: https://app.shortcut.com/chainlinklabs/epic/33587/overhaul-config?cf_workflow=500000005&ct_workflow=all -func SetupMultiplePrimaries(db *sqlx.DB, cfg LegacyEthNodeConfig, lggr logger.Logger) (err error) { +func SetupNodes(db *sqlx.DB, cfg LegacyEthNodeConfig, lggr logger.Logger) (err error) { if cfg.EthereumNodes() == "" { return nil } - lggr.Info("EVM_NODES was set; clobbering evm_nodes table") - _, err = db.Exec(`TRUNCATE evm_nodes;`) - if err != nil { - return errors.Wrap(err, "failed to truncate evm_nodes table while inserting nodes set by EVM_NODES") - } - var nodes []evmtypes.Node if err = json.Unmarshal([]byte(cfg.EthereumNodes()), &nodes); err != nil { - return errors.Wrapf(err, "invalid nodes json, got: %q", cfg.EthereumNodes()) + return errors.Wrapf(err, "invalid EVM_NODES json, got: %q", cfg.EthereumNodes()) } // Sorting gives a consistent insert ordering sort.Slice(nodes, func(i, j int) bool { return nodes[i].Name < nodes[j].Name }) - chainIDs := make(map[string]struct{}) - for _, n := range nodes { - chainIDs[n.EVMChainID.String()] = struct{}{} - } - for cid := range chainIDs { - if _, err := pg.NewQ(db, lggr, cfg).Exec("INSERT INTO evm_chains (id, created_at, updated_at) VALUES ($1, NOW(), NOW()) ON CONFLICT DO NOTHING;", cid); err != nil { - return errors.Wrapf(err, "failed to insert chain %s", cid) - } - } + lggr.Info("EVM_NODES was set; clobbering evm_nodes table") - stmt := `INSERT INTO evm_nodes (name, evm_chain_id, ws_url, http_url, send_only, created_at, updated_at) - VALUES (:name, :evm_chain_id, :ws_url, :http_url, :send_only, now(), now()) - ON CONFLICT DO NOTHING;` - _, err = pg.NewQ(db, lggr, cfg).NamedExec(stmt, nodes) + orm := NewORM(db, lggr, cfg) + return orm.SetupNodes(nodes, uniqueIDs(nodes)) +} - return errors.Wrap(err, "failed to insert nodes") +func uniqueIDs(ns []evmtypes.Node) (ids []utils.Big) { + m := make(map[string]struct{}) + for _, n := range ns { + id := n.EVMChainID + sid := id.String() + if _, ok := m[sid]; ok { + continue + } + ids = append(ids, id) + m[sid] = struct{}{} + } + return } diff --git a/core/chains/evm/legacy_test.go b/core/chains/evm/legacy_test.go index c3893cd783e..9abb79ece4e 100644 --- a/core/chains/evm/legacy_test.go +++ b/core/chains/evm/legacy_test.go @@ -114,7 +114,7 @@ func Test_ClobberDBFromEnv(t *testing.T) { }) } -func Test_SetupMultiplePrimaries(t *testing.T) { +func TestSetupNodes(t *testing.T) { db := pgtest.NewSqlxDB(t) // Insert existing node which will be erased @@ -186,7 +186,7 @@ func Test_SetupMultiplePrimaries(t *testing.T) { evmNodes: s, } - err := evm.ClobberDBFromEnv(db, cfg, logger.TestLogger(t)) + err := evm.SetupNodes(db, cfg, logger.TestLogger(t)) require.NoError(t, err) cltest.AssertCount(t, db, "evm_nodes", 7) diff --git a/core/chains/evm/log/broadcaster.go b/core/chains/evm/log/broadcaster.go index 81103684102..dc4d75b5604 100644 --- a/core/chains/evm/log/broadcaster.go +++ b/core/chains/evm/log/broadcaster.go @@ -59,7 +59,11 @@ type ( WasAlreadyConsumed(lb Broadcast, qopts ...pg.QOpt) (bool, error) MarkConsumed(lb Broadcast, qopts ...pg.QOpt) error - // NOTE: WasAlreadyConsumed and MarkConsumed MUST be used within a single goroutine in order for WasAlreadyConsumed to be accurate + + // MarkManyConsumed marks all the provided log broadcasts as consumed. + MarkManyConsumed(lbs []Broadcast, qopts ...pg.QOpt) error + + // NOTE: WasAlreadyConsumed, MarkConsumed and MarkManyConsumed MUST be used within a single goroutine in order for WasAlreadyConsumed to be accurate } BroadcasterInTest interface { @@ -100,8 +104,8 @@ type ( // Use the same channel for subs/unsubs so ordering is preserved // (unsubscribe must happen after subscribe) - changeSubscriberStatus *utils.Mailbox - newHeads *utils.Mailbox + changeSubscriberStatus *utils.Mailbox[changeSubscriberStatus] + newHeads *utils.Mailbox[*evmtypes.Head] utils.StartStopOnce utils.DependentAwaiter @@ -169,8 +173,8 @@ func NewBroadcaster(orm ORM, ethClient evmclient.Client, config Config, lggr log ethSubscriber: newEthSubscriber(ethClient, config, lggr, chStop), registrations: newRegistrations(lggr, *ethClient.ChainID()), logPool: newLogPool(), - changeSubscriberStatus: utils.NewMailbox(100000), // Seems unlikely we'd subscribe more than 100,000 times before LB start - newHeads: utils.NewMailbox(1), + changeSubscriberStatus: utils.NewMailbox[changeSubscriberStatus](100000), // Seems unlikely we'd subscribe more than 100,000 times before LB start + newHeads: utils.NewMailbox[*evmtypes.Head](1), DependentAwaiter: utils.NewDependentAwaiter(), chStop: chStop, highestSavedHead: highestSavedHead, @@ -271,7 +275,7 @@ func (b *broadcaster) Register(listener Listener, opts ListenerOpts) (unsubscrib func (b *broadcaster) OnNewLongestChain(ctx context.Context, head *evmtypes.Head) { wasOverCapacity := b.newHeads.Deliver(head) if wasOverCapacity { - b.logger.Debugw("TRACE: Dropped the older head in the mailbox, while inserting latest (which is fine)", "latestBlockNumber", head.Number) + b.logger.Debugw("Dropped the older head in the mailbox, while inserting latest (which is fine)", "latestBlockNumber", head.Number) } } @@ -516,11 +520,10 @@ func (b *broadcaster) onNewHeads() { var latestHead *evmtypes.Head for { // We only care about the most recent head - item := b.newHeads.RetrieveLatestAndClear() - if item == nil { + head := b.newHeads.RetrieveLatestAndClear() + if head == nil { break } - head := evmtypes.AsHead(item) latestHead = head } @@ -584,14 +587,10 @@ func (b *broadcaster) onNewHeads() { func (b *broadcaster) onChangeSubscriberStatus() (needsResubscribe bool) { for { - x, exists := b.changeSubscriberStatus.Retrieve() + change, exists := b.changeSubscriberStatus.Retrieve() if !exists { break } - change, ok := x.(changeSubscriberStatus) - if !ok { - b.logger.Panicf("expected `changeSubscriberStatus`, got %T", x) - } sub := change.sub if change.newStatus == subscriberStatusSubscribe { @@ -667,6 +666,23 @@ func (b *broadcaster) MarkConsumed(lb Broadcast, qopts ...pg.QOpt) error { return b.orm.MarkBroadcastConsumed(lb.RawLog().BlockHash, lb.RawLog().BlockNumber, lb.RawLog().Index, lb.JobID(), qopts...) } +// MarkManyConsumed marks the logs as having been successfully consumed by the subscriber +func (b *broadcaster) MarkManyConsumed(lbs []Broadcast, qopts ...pg.QOpt) (err error) { + var ( + blockHashes = make([]common.Hash, len(lbs)) + blockNumbers = make([]uint64, len(lbs)) + logIndexes = make([]uint, len(lbs)) + jobIDs = make([]int32, len(lbs)) + ) + for i := range lbs { + blockHashes[i] = lbs[i].RawLog().BlockHash + blockNumbers[i] = lbs[i].RawLog().BlockNumber + logIndexes[i] = lbs[i].RawLog().Index + jobIDs[i] = lbs[i].JobID() + } + return b.orm.MarkBroadcastsConsumed(blockHashes, blockNumbers, logIndexes, jobIDs, qopts...) +} + // test only func (b *broadcaster) TrackedAddressesCount() uint32 { return b.trackedAddressesCount.Load() @@ -722,6 +738,9 @@ func (n *NullBroadcaster) WasAlreadyConsumed(lb Broadcast, qopts ...pg.QOpt) (bo func (n *NullBroadcaster) MarkConsumed(lb Broadcast, qopts ...pg.QOpt) error { return errors.New(n.ErrMsg) } +func (n *NullBroadcaster) MarkManyConsumed(lbs []Broadcast, qopts ...pg.QOpt) error { + return errors.New(n.ErrMsg) +} func (n *NullBroadcaster) AddDependents(int) {} func (n *NullBroadcaster) AwaitDependents() <-chan struct{} { diff --git a/core/chains/evm/log/eth_subscriber.go b/core/chains/evm/log/eth_subscriber.go index fd813742550..9f73d907134 100644 --- a/core/chains/evm/log/eth_subscriber.go +++ b/core/chains/evm/log/eth_subscriber.go @@ -60,7 +60,7 @@ func (sub *ethSubscriber) backfillLogs(fromBlockOverride null.Int64, addresses [ if latestHeight < 0 { latestBlock, err := sub.ethClient.HeadByNumber(ctxParent, nil) if err != nil { - sub.logger.Errorw("LogBroadcaster: Backfill - could not fetch latest block header, will retry", "err", err) + sub.logger.Warnw("LogBroadcaster: Backfill - could not fetch latest block header, will retry", "err", err) return true } else if latestBlock == nil { sub.logger.Warn("LogBroadcaster: Got nil block header, will retry") @@ -255,6 +255,7 @@ func (sub managedSubscriptionImpl) Logs() chan types.Log { func (sub managedSubscriptionImpl) Unsubscribe() { sub.subscription.Unsubscribe() + <-sub.Err() // ensure sending has stopped before closing the chan close(sub.chRawLogs) } diff --git a/core/chains/evm/log/helpers_test.go b/core/chains/evm/log/helpers_test.go index 8b6645e2116..eee5d410b1f 100644 --- a/core/chains/evm/log/helpers_test.go +++ b/core/chains/evm/log/helpers_test.go @@ -1,6 +1,7 @@ package log_test import ( + "context" "fmt" "math/big" "sync" @@ -43,12 +44,12 @@ type broadcasterHelper struct { t *testing.T lb log.BroadcasterInTest db *sqlx.DB - mockEth *mockEth + mockEth *evmtest.MockEth globalConfig *configtest.TestGeneralConfig config evmconfig.ChainScopedConfig // each received channel corresponds to one eth subscription - chchRawLogs chan chan<- types.Log + chchRawLogs chan evmtest.RawSub[types.Log] toUnsubscribe []func() pipelineHelper cltest.JobPipelineV2TestHelper } @@ -70,9 +71,9 @@ func (c broadcasterHelperCfg) new(t *testing.T, blockHeight int64, timesSubscrib FilterLogsResult: filterLogsResult, } - chchRawLogs := make(chan chan<- types.Log, timesSubscribe) + chchRawLogs := make(chan evmtest.RawSub[types.Log], timesSubscribe) mockEth := newMockEthClient(t, chchRawLogs, blockHeight, expectedCalls) - helper := c.newWithEthClient(t, mockEth.ethClient) + helper := c.newWithEthClient(t, mockEth.EthClient) helper.chchRawLogs = chchRawLogs helper.mockEth = mockEth helper.globalConfig.Overrides.GlobalEvmFinalityDepth = null.IntFrom(10) @@ -165,7 +166,7 @@ func (helper *broadcasterHelper) requireBroadcastCount(expectedCount int) { return count.Count, err } - g.Eventually(comparisonFunc, cltest.WaitTimeout(helper.t), time.Second).Should(gomega.Equal(expectedCount)) + g.Eventually(comparisonFunc, testutils.WaitTimeout(helper.t), time.Second).Should(gomega.Equal(expectedCount)) g.Consistently(comparisonFunc, 1*time.Second, 200*time.Millisecond).Should(gomega.Equal(expectedCount)) } @@ -358,27 +359,6 @@ type mockListener struct { func (l *mockListener) JobID() int32 { return l.jobID } func (l *mockListener) HandleLog(log.Broadcast) {} -type mockEth struct { - ethClient *evmmocks.Client - sub *evmmocks.Subscription - subscribeCalls atomic.Int32 - unsubscribeCalls atomic.Int32 - checkFilterLogs func(int64, int64) -} - -func (mock *mockEth) assertExpectations(t *testing.T) { - mock.ethClient.AssertExpectations(t) - mock.sub.AssertExpectations(t) -} - -func (mock *mockEth) subscribeCallCount() int32 { - return mock.subscribeCalls.Load() -} - -func (mock *mockEth) unsubscribeCallCount() int32 { - return mock.unsubscribeCalls.Load() -} - type mockEthClientExpectedCalls struct { SubscribeFilterLogs int HeaderByNumber int @@ -387,45 +367,41 @@ type mockEthClientExpectedCalls struct { FilterLogsResult []types.Log } -func newMockEthClient(t *testing.T, chchRawLogs chan<- chan<- types.Log, blockHeight int64, expectedCalls mockEthClientExpectedCalls) *mockEth { - ethClient, sub := cltest.NewEthClientAndSubMock(t) - mockEth := &mockEth{ - ethClient: ethClient, - sub: sub, - checkFilterLogs: nil, - } - mockEth.ethClient.On("ChainID", mock.Anything).Return(&cltest.FixtureChainID) - mockEth.ethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything). - Run(func(args mock.Arguments) { - mockEth.subscribeCalls.Inc() - chchRawLogs <- args.Get(2).(chan<- types.Log) - }). - Return(mockEth.sub, nil). +func newMockEthClient(t *testing.T, chchRawLogs chan<- evmtest.RawSub[types.Log], blockHeight int64, expectedCalls mockEthClientExpectedCalls) *evmtest.MockEth { + ethClient := new(evmmocks.Client) + ethClient.Test(t) + mockEth := &evmtest.MockEth{EthClient: ethClient} + mockEth.EthClient.On("ChainID", mock.Anything).Return(&cltest.FixtureChainID) + mockEth.EthClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything). + Return( + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchRawLogs <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) error { + return nil + }, + ). Times(expectedCalls.SubscribeFilterLogs) - mockEth.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)). + mockEth.EthClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)). Return(&evmtypes.Head{Number: blockHeight}, nil). Times(expectedCalls.HeaderByNumber) if expectedCalls.FilterLogs > 0 { - mockEth.ethClient.On("FilterLogs", mock.Anything, mock.Anything). + mockEth.EthClient.On("FilterLogs", mock.Anything, mock.Anything). Run(func(args mock.Arguments) { filterQuery := args.Get(1).(ethereum.FilterQuery) fromBlock := filterQuery.FromBlock.Int64() toBlock := filterQuery.ToBlock.Int64() - if mockEth.checkFilterLogs != nil { - mockEth.checkFilterLogs(fromBlock, toBlock) + if mockEth.CheckFilterLogs != nil { + mockEth.CheckFilterLogs(fromBlock, toBlock) } }). Return(expectedCalls.FilterLogsResult, nil). Times(expectedCalls.FilterLogs) } - mockEth.sub.On("Err"). - Return(nil) - - mockEth.sub.On("Unsubscribe"). - Return(). - Run(func(mock.Arguments) { mockEth.unsubscribeCalls.Inc() }) return mockEth } diff --git a/core/chains/evm/log/integration_test.go b/core/chains/evm/log/integration_test.go index f9990c655f7..1cea96fa581 100644 --- a/core/chains/evm/log/integration_test.go +++ b/core/chains/evm/log/integration_test.go @@ -24,6 +24,7 @@ import ( "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flux_aggregator_wrapper" "github.com/smartcontractkit/chainlink/core/internal/testutils" + "github.com/smartcontractkit/chainlink/core/internal/testutils/evmtest" "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/utils" @@ -42,25 +43,25 @@ func TestBroadcaster_AwaitsInitialSubscribersOnStartup(t *testing.T) { helper.start() defer helper.stop() - require.Eventually(t, func() bool { return helper.mockEth.subscribeCallCount() == 0 }, cltest.WaitTimeout(t), 100*time.Millisecond) - g.Consistently(func() int32 { return helper.mockEth.subscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(0))) + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 0 }, cltest.WaitTimeout(t), 100*time.Millisecond) + g.Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(0))) helper.lb.DependentReady() - require.Eventually(t, func() bool { return helper.mockEth.subscribeCallCount() == 0 }, cltest.WaitTimeout(t), 100*time.Millisecond) - g.Consistently(func() int32 { return helper.mockEth.subscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(0))) + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 0 }, cltest.WaitTimeout(t), 100*time.Millisecond) + g.Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(0))) helper.lb.DependentReady() - require.Eventually(t, func() bool { return helper.mockEth.subscribeCallCount() == 1 }, cltest.WaitTimeout(t), 100*time.Millisecond) - g.Consistently(func() int32 { return helper.mockEth.subscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 1 }, cltest.WaitTimeout(t), 100*time.Millisecond) + g.Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) helper.unsubscribeAll() - require.Eventually(t, func() bool { return helper.mockEth.unsubscribeCallCount() == 1 }, cltest.WaitTimeout(t), 100*time.Millisecond) - g.Consistently(func() int32 { return helper.mockEth.unsubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) + require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() == 1 }, cltest.WaitTimeout(t), 100*time.Millisecond) + g.Consistently(func() int32 { return helper.mockEth.UnsubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) - helper.mockEth.assertExpectations(t) + helper.mockEth.AssertExpectations(t) } func TestBroadcaster_ResubscribesOnAddOrRemoveContract(t *testing.T) { @@ -78,9 +79,9 @@ func TestBroadcaster_ResubscribesOnAddOrRemoveContract(t *testing.T) { FilterLogs: backfillTimes, } - chchRawLogs := make(chan chan<- types.Log, backfillTimes) + chchRawLogs := make(chan evmtest.RawSub[types.Log], backfillTimes) mockEth := newMockEthClient(t, chchRawLogs, blockHeight, expectedCalls) - helper := newBroadcasterHelperWithEthClient(t, mockEth.ethClient, cltest.Head(lastStoredBlockHeight)) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, cltest.Head(lastStoredBlockHeight)) helper.mockEth = mockEth blockBackfillDepth := helper.config.BlockBackfillDepth() @@ -89,7 +90,7 @@ func TestBroadcaster_ResubscribesOnAddOrRemoveContract(t *testing.T) { // the first backfill should use the height of last head saved to the db, // minus maxNumConfirmations of subscribers and minus blockBackfillDepth - mockEth.checkFilterLogs = func(fromBlock int64, toBlock int64) { + mockEth.CheckFilterLogs = func(fromBlock int64, toBlock int64) { backfillCount.Store(1) require.Equal(t, lastStoredBlockHeight-numConfirmations-int64(blockBackfillDepth), fromBlock) } @@ -106,15 +107,15 @@ func TestBroadcaster_ResubscribesOnAddOrRemoveContract(t *testing.T) { helper.start() defer helper.stop() - require.Eventually(t, func() bool { return helper.mockEth.subscribeCallCount() == 1 }, cltest.WaitTimeout(t), time.Second) - gomega.NewWithT(t).Consistently(func() int32 { return helper.mockEth.subscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) - gomega.NewWithT(t).Consistently(func() int32 { return helper.mockEth.unsubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(0))) + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 1 }, cltest.WaitTimeout(t), time.Second) + gomega.NewWithT(t).Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) + gomega.NewWithT(t).Consistently(func() int32 { return helper.mockEth.UnsubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(0))) require.Eventually(t, func() bool { return backfillCount.Load() == 1 }, cltest.WaitTimeout(t), 100*time.Millisecond) helper.unsubscribeAll() // now the backfill must use the blockBackfillDepth - mockEth.checkFilterLogs = func(fromBlock int64, toBlock int64) { + mockEth.CheckFilterLogs = func(fromBlock int64, toBlock int64) { require.Equal(t, blockHeight-int64(blockBackfillDepth), fromBlock) backfillCount.Store(2) } @@ -122,13 +123,13 @@ func TestBroadcaster_ResubscribesOnAddOrRemoveContract(t *testing.T) { listenerLast := helper.newLogListenerWithJob("last") helper.register(listenerLast, newMockContract(), 1) - require.Eventually(t, func() bool { return helper.mockEth.unsubscribeCallCount() >= 1 }, cltest.WaitTimeout(t), time.Second) - gomega.NewWithT(t).Consistently(func() int32 { return helper.mockEth.subscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(2))) - gomega.NewWithT(t).Consistently(func() int32 { return helper.mockEth.unsubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) + require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() >= 1 }, cltest.WaitTimeout(t), time.Second) + gomega.NewWithT(t).Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(2))) + gomega.NewWithT(t).Consistently(func() int32 { return helper.mockEth.UnsubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) require.Eventually(t, func() bool { return backfillCount.Load() == 2 }, cltest.WaitTimeout(t), time.Second) - helper.mockEth.assertExpectations(t) + helper.mockEth.AssertExpectations(t) } func TestBroadcaster_BackfillOnNodeStartAndOnReplay(t *testing.T) { @@ -145,9 +146,9 @@ func TestBroadcaster_BackfillOnNodeStartAndOnReplay(t *testing.T) { FilterLogs: 2, } - chchRawLogs := make(chan chan<- types.Log, backfillTimes) + chchRawLogs := make(chan evmtest.RawSub[types.Log], backfillTimes) mockEth := newMockEthClient(t, chchRawLogs, blockHeight, expectedCalls) - helper := newBroadcasterHelperWithEthClient(t, mockEth.ethClient, cltest.Head(lastStoredBlockHeight)) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, cltest.Head(lastStoredBlockHeight)) helper.mockEth = mockEth maxNumConfirmations := int64(10) @@ -164,7 +165,7 @@ func TestBroadcaster_BackfillOnNodeStartAndOnReplay(t *testing.T) { // the first backfill should use the height of last head saved to the db, // minus maxNumConfirmations of subscribers and minus blockBackfillDepth - mockEth.checkFilterLogs = func(fromBlock int64, toBlock int64) { + mockEth.CheckFilterLogs = func(fromBlock int64, toBlock int64) { times := backfillCount.Inc() - 1 if times == 0 { require.Equal(t, lastStoredBlockHeight-maxNumConfirmations-int64(blockBackfillDepth), fromBlock) @@ -177,7 +178,7 @@ func TestBroadcaster_BackfillOnNodeStartAndOnReplay(t *testing.T) { helper.start() defer helper.stop() - require.Eventually(t, func() bool { return helper.mockEth.subscribeCallCount() == 1 }, cltest.WaitTimeout(t), time.Second) + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 1 }, cltest.WaitTimeout(t), time.Second) require.Eventually(t, func() bool { return backfillCount.Load() == 1 }, cltest.WaitTimeout(t), time.Second) helper.lb.ReplayFromBlock(replayFrom, false) @@ -185,8 +186,8 @@ func TestBroadcaster_BackfillOnNodeStartAndOnReplay(t *testing.T) { require.Eventually(t, func() bool { return backfillCount.Load() >= 2 }, cltest.WaitTimeout(t), time.Second) }() - require.Eventually(t, func() bool { return helper.mockEth.unsubscribeCallCount() >= 1 }, cltest.WaitTimeout(t), time.Second) - helper.mockEth.assertExpectations(t) + require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() >= 1 }, cltest.WaitTimeout(t), time.Second) + helper.mockEth.AssertExpectations(t) } func TestBroadcaster_ReplaysLogs(t *testing.T) { @@ -202,11 +203,11 @@ func TestBroadcaster_ReplaysLogs(t *testing.T) { blocks.LogOnBlockNum(7, contract.Address()), } - mockEth := newMockEthClient(t, make(chan chan<- types.Log, 4), blockHeight, mockEthClientExpectedCalls{ + mockEth := newMockEthClient(t, make(chan evmtest.RawSub[types.Log], 4), blockHeight, mockEthClientExpectedCalls{ FilterLogs: 4, FilterLogsResult: sentLogs, }) - helper := newBroadcasterHelperWithEthClient(t, mockEth.ethClient, cltest.Head(blockHeight)) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, cltest.Head(blockHeight)) helper.mockEth = mockEth listener := helper.newLogListenerWithJob("listener") @@ -257,7 +258,7 @@ func TestBroadcaster_ReplaysLogs(t *testing.T) { }() - require.Eventually(t, func() bool { return helper.mockEth.unsubscribeCallCount() >= 1 }, cltest.WaitTimeout(t), time.Second) + require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() >= 1 }, cltest.WaitTimeout(t), time.Second) } func TestBroadcaster_BackfillUnconsumedAfterCrash(t *testing.T) { @@ -309,8 +310,8 @@ func TestBroadcaster_BackfillUnconsumedAfterCrash(t *testing.T) { }) chRawLogs := <-helper.chchRawLogs - chRawLogs <- log1 - chRawLogs <- log2 + chRawLogs.TrySend(log1) + chRawLogs.TrySend(log2) <-headsDone @@ -460,9 +461,9 @@ func TestBroadcaster_ShallowBackfillOnNodeStart(t *testing.T) { FilterLogs: backfillTimes, } - chchRawLogs := make(chan chan<- types.Log, backfillTimes) + chchRawLogs := make(chan evmtest.RawSub[types.Log], backfillTimes) mockEth := newMockEthClient(t, chchRawLogs, blockHeight, expectedCalls) - helper := newBroadcasterHelperWithEthClient(t, mockEth.ethClient, cltest.Head(lastStoredBlockHeight)) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, cltest.Head(lastStoredBlockHeight)) helper.mockEth = mockEth backfillDepth := 15 @@ -479,7 +480,7 @@ func TestBroadcaster_ShallowBackfillOnNodeStart(t *testing.T) { helper.register(listener2, newMockContract(), uint32(2)) // the backfill does not use the height from DB because BlockBackfillSkip is true - mockEth.checkFilterLogs = func(fromBlock int64, toBlock int64) { + mockEth.CheckFilterLogs = func(fromBlock int64, toBlock int64) { backfillCount.Store(1) require.Equal(t, blockHeight-int64(backfillDepth), fromBlock) } @@ -488,12 +489,12 @@ func TestBroadcaster_ShallowBackfillOnNodeStart(t *testing.T) { helper.start() defer helper.stop() - require.Eventually(t, func() bool { return helper.mockEth.subscribeCallCount() == 1 }, cltest.WaitTimeout(t), time.Second) + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 1 }, cltest.WaitTimeout(t), time.Second) require.Eventually(t, func() bool { return backfillCount.Load() == 1 }, cltest.WaitTimeout(t), time.Second) }() - require.Eventually(t, func() bool { return helper.mockEth.unsubscribeCallCount() >= 1 }, cltest.WaitTimeout(t), time.Second) - helper.mockEth.assertExpectations(t) + require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() >= 1 }, cltest.WaitTimeout(t), time.Second) + helper.mockEth.AssertExpectations(t) } func TestBroadcaster_BackfillInBatches(t *testing.T) { @@ -512,9 +513,9 @@ func TestBroadcaster_BackfillInBatches(t *testing.T) { FilterLogs: expectedBatches, } - chchRawLogs := make(chan chan<- types.Log, backfillTimes) + chchRawLogs := make(chan evmtest.RawSub[types.Log], backfillTimes) mockEth := newMockEthClient(t, chchRawLogs, blockHeight, expectedCalls) - helper := newBroadcasterHelperWithEthClient(t, mockEth.ethClient, cltest.Head(lastStoredBlockHeight)) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, cltest.Head(lastStoredBlockHeight)) helper.mockEth = mockEth blockBackfillDepth := helper.config.BlockBackfillDepth() @@ -525,9 +526,9 @@ func TestBroadcaster_BackfillInBatches(t *testing.T) { lggr := logger.TestLogger(t) backfillStart := lastStoredBlockHeight - numConfirmations - int64(blockBackfillDepth) // the first backfill should start from before the last stored head - mockEth.checkFilterLogs = func(fromBlock int64, toBlock int64) { + mockEth.CheckFilterLogs = func(fromBlock int64, toBlock int64) { times := backfillCount.Inc() - 1 - lggr.Warnf("Log Batch: --------- times %v - %v, %v", times, fromBlock, toBlock) + lggr.Infof("Log Batch: --------- times %v - %v, %v", times, fromBlock, toBlock) if times <= 7 { require.Equal(t, backfillStart+batchSize*times, fromBlock) @@ -549,9 +550,9 @@ func TestBroadcaster_BackfillInBatches(t *testing.T) { helper.unsubscribeAll() - require.Eventually(t, func() bool { return helper.mockEth.unsubscribeCallCount() >= 1 }, cltest.WaitTimeout(t), time.Second) + require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() >= 1 }, cltest.WaitTimeout(t), time.Second) - helper.mockEth.assertExpectations(t) + helper.mockEth.AssertExpectations(t) } func TestBroadcaster_BackfillALargeNumberOfLogs(t *testing.T) { @@ -585,9 +586,9 @@ func TestBroadcaster_BackfillALargeNumberOfLogs(t *testing.T) { FilterLogsResult: backfilledLogs, } - chchRawLogs := make(chan chan<- types.Log, backfillTimes) + chchRawLogs := make(chan evmtest.RawSub[types.Log], backfillTimes) mockEth := newMockEthClient(t, chchRawLogs, blockHeight, expectedCalls) - helper := newBroadcasterHelperWithEthClient(t, mockEth.ethClient, cltest.Head(lastStoredBlockHeight)) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, cltest.Head(lastStoredBlockHeight)) helper.mockEth = mockEth helper.globalConfig.Overrides.GlobalEvmLogBackfillBatchSize = null.IntFrom(int64(batchSize)) @@ -595,7 +596,7 @@ func TestBroadcaster_BackfillALargeNumberOfLogs(t *testing.T) { var backfillCount atomic.Int64 lggr := logger.TestLogger(t) - mockEth.checkFilterLogs = func(fromBlock int64, toBlock int64) { + mockEth.CheckFilterLogs = func(fromBlock int64, toBlock int64) { times := backfillCount.Inc() - 1 lggr.Warnf("Log Batch: --------- times %v - %v, %v", times, fromBlock, toBlock) } @@ -607,9 +608,9 @@ func TestBroadcaster_BackfillALargeNumberOfLogs(t *testing.T) { g.Eventually(func() int64 { return backfillCount.Load() }, cltest.WaitTimeout(t), time.Second).Should(gomega.Equal(int64(expectedBatches))) helper.unsubscribeAll() - g.Eventually(func() int32 { return helper.mockEth.unsubscribeCallCount() }, cltest.WaitTimeout(t), time.Second).Should(gomega.BeNumerically(">=", int32(1))) + g.Eventually(func() int32 { return helper.mockEth.UnsubscribeCallCount() }, cltest.WaitTimeout(t), time.Second).Should(gomega.BeNumerically(">=", int32(1))) - helper.mockEth.assertExpectations(t) + helper.mockEth.AssertExpectations(t) } func TestBroadcaster_BroadcastsToCorrectRecipients(t *testing.T) { @@ -638,14 +639,15 @@ func TestBroadcaster_BroadcastsToCorrectRecipients(t *testing.T) { listener3 := helper.newLogListenerWithJob("listener 3") listener4 := helper.newLogListenerWithJob("listener 4") + helper.register(listener1, contract1, 1) + helper.register(listener2, contract1, 1) + helper.register(listener3, contract2, 1) + helper.register(listener4, contract2, 1) + func() { helper.start() defer helper.stop() - helper.register(listener1, contract1, 1) - helper.register(listener2, contract1, 1) - helper.register(listener3, contract2, 1) - helper.register(listener4, contract2, 1) headsDone := cltest.SimulateIncomingHeads(t, cltest.SimulateIncomingHeadsArgs{ StartBlock: 0, EndBlock: 9, @@ -658,10 +660,10 @@ func TestBroadcaster_BroadcastsToCorrectRecipients(t *testing.T) { chRawLogs := <-helper.chchRawLogs for _, log := range addr1SentLogs { - chRawLogs <- log + chRawLogs.TrySend(log) } for _, log := range addr2SentLogs { - chRawLogs <- log + chRawLogs.TrySend(log) } <-headsDone @@ -673,7 +675,7 @@ func TestBroadcaster_BroadcastsToCorrectRecipients(t *testing.T) { requireEqualLogs(t, addr2SentLogs, listener3.received.getUniqueLogs()) requireEqualLogs(t, addr2SentLogs, listener4.received.getUniqueLogs()) }() - helper.mockEth.assertExpectations(t) + helper.mockEth.AssertExpectations(t) } func TestBroadcaster_BroadcastsAtCorrectHeights(t *testing.T) { @@ -707,7 +709,7 @@ func TestBroadcaster_BroadcastsAtCorrectHeights(t *testing.T) { chRawLogs := <-helper.chchRawLogs for _, log := range addr1SentLogs { - chRawLogs <- log + chRawLogs.TrySend(log) } helper.requireBroadcastCount(5) @@ -756,7 +758,7 @@ func TestBroadcaster_BroadcastsAtCorrectHeights(t *testing.T) { assert.Equal(t, len(logsOnBlocks), len(expectedLogsOnBlocks)) require.Equal(t, logsOnBlocks, expectedLogsOnBlocks) - helper.mockEth.assertExpectations(t) + helper.mockEth.AssertExpectations(t) } func TestBroadcaster_DeletesOldLogsAfterNumberOfHeads(t *testing.T) { @@ -794,7 +796,7 @@ func TestBroadcaster_DeletesOldLogsAfterNumberOfHeads(t *testing.T) { chRawLogs := <-helper.chchRawLogs for _, log := range addr1SentLogs { - chRawLogs <- log + chRawLogs.TrySend(log) } helper.requireBroadcastCount(6) @@ -859,7 +861,7 @@ func TestBroadcaster_DeletesOldLogsOnlyAfterFinalityDepth(t *testing.T) { chRawLogs := <-helper.chchRawLogs for _, log := range addr1SentLogs { - chRawLogs <- log + chRawLogs.TrySend(log) } <-headsDone @@ -961,7 +963,7 @@ func TestBroadcaster_FilterByTopicValues(t *testing.T) { chRawLogs := <-helper.chchRawLogs for _, log := range addr1SentLogs { - chRawLogs <- log + chRawLogs.TrySend(log) } <-headsDone @@ -997,9 +999,9 @@ func TestBroadcaster_BroadcastsWithOneDelayedLog(t *testing.T) { chRawLogs := <-helper.chchRawLogs - chRawLogs <- addr1SentLogs[0] - chRawLogs <- addr1SentLogs[1] - chRawLogs <- addr1SentLogs[2] + chRawLogs.TrySend(addr1SentLogs[0]) + chRawLogs.TrySend(addr1SentLogs[1]) + chRawLogs.TrySend(addr1SentLogs[2]) <-cltest.SimulateIncomingHeads(t, cltest.SimulateIncomingHeadsArgs{ StartBlock: 0, @@ -1008,7 +1010,7 @@ func TestBroadcaster_BroadcastsWithOneDelayedLog(t *testing.T) { Blocks: blocks, }) - chRawLogs <- addr1SentLogs[3] + chRawLogs.TrySend(addr1SentLogs[3]) <-cltest.SimulateIncomingHeads(t, cltest.SimulateIncomingHeadsArgs{ StartBlock: 4, @@ -1020,7 +1022,7 @@ func TestBroadcaster_BroadcastsWithOneDelayedLog(t *testing.T) { helper.requireBroadcastCount(4) helper.stop() - helper.mockEth.assertExpectations(t) + helper.mockEth.AssertExpectations(t) } func TestBroadcaster_BroadcastsAtCorrectHeightsWithLogsEarlierThanHeads(t *testing.T) { @@ -1044,7 +1046,7 @@ func TestBroadcaster_BroadcastsAtCorrectHeightsWithLogsEarlierThanHeads(t *testi chRawLogs := <-helper.chchRawLogs for _, log := range addr1SentLogs { - chRawLogs <- log + chRawLogs.TrySend(log) } <-cltest.SimulateIncomingHeads(t, cltest.SimulateIncomingHeadsArgs{ @@ -1068,7 +1070,7 @@ func TestBroadcaster_BroadcastsAtCorrectHeightsWithLogsEarlierThanHeads(t *testi listener1.received.getLogs(), ) - helper.mockEth.assertExpectations(t) + helper.mockEth.AssertExpectations(t) } func TestBroadcaster_BroadcastsAtCorrectHeightsWithHeadsEarlierThanLogs(t *testing.T) { @@ -1100,7 +1102,7 @@ func TestBroadcaster_BroadcastsAtCorrectHeightsWithHeadsEarlierThanLogs(t *testi }) for _, log := range addr1SentLogs { - chRawLogs <- log + chRawLogs.TrySend(log) } <-cltest.SimulateIncomingHeads(t, cltest.SimulateIncomingHeadsArgs{ @@ -1124,7 +1126,7 @@ func TestBroadcaster_BroadcastsAtCorrectHeightsWithHeadsEarlierThanLogs(t *testi listener1.received.getLogs(), ) - helper.mockEth.assertExpectations(t) + helper.mockEth.AssertExpectations(t) } func TestBroadcaster_Register_ResubscribesToMostRecentlySeenBlock(t *testing.T) { @@ -1134,28 +1136,41 @@ func TestBroadcaster_Register_ResubscribesToMostRecentlySeenBlock(t *testing.T) expectedBlock = 5 ) var ( - ethClient, sub = cltest.NewEthClientAndSubMock(t) - contract0 = newMockContract() - contract1 = newMockContract() - contract2 = newMockContract() + ethClient = new(evmmocks.Client) + contract0 = newMockContract() + contract1 = newMockContract() + contract2 = newMockContract() ) - - chchRawLogs := make(chan chan<- types.Log, backfillTimes) + ethClient.Test(t) + mockEth := &evmtest.MockEth{EthClient: ethClient} + chchRawLogs := make(chan evmtest.RawSub[types.Log], backfillTimes) chStarted := make(chan struct{}) ethClient.On("ChainID", mock.Anything).Return(&cltest.FixtureChainID) ethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything). - Run(func(args mock.Arguments) { - chchRawLogs <- args.Get(2).(chan<- types.Log) - close(chStarted) - }). - Return(sub, nil). + Return( + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) ethereum.Subscription { + defer close(chStarted) + sub := mockEth.NewSub(t) + chchRawLogs <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) error { + return nil + }, + ). Once() ethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything). - Run(func(args mock.Arguments) { - chchRawLogs <- args.Get(2).(chan<- types.Log) - }). - Return(sub, nil). + Return( + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchRawLogs <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) error { + return nil + }, + ). Times(3) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)). @@ -1194,9 +1209,6 @@ func TestBroadcaster_Register_ResubscribesToMostRecentlySeenBlock(t *testing.T) Return(nil, nil). Once() - sub.On("Unsubscribe").Return() - sub.On("Err").Return(nil) - helper := newBroadcasterHelperWithEthClient(t, ethClient, nil) helper.lb.AddDependents(1) helper.start() @@ -1252,7 +1264,6 @@ func TestBroadcaster_Register_ResubscribesToMostRecentlySeenBlock(t *testing.T) } cltest.EventuallyExpectationsMet(t, ethClient, cltest.WaitTimeout(t), time.Second) - cltest.EventuallyExpectationsMet(t, sub, cltest.WaitTimeout(t), time.Second) } func TestBroadcaster_ReceivesAllLogsWhenResubscribing(t *testing.T) { @@ -1371,7 +1382,7 @@ func TestBroadcaster_ReceivesAllLogsWhenResubscribing(t *testing.T) { EndBlock: test.blockHeight2 + 1, Blocks: blocks, HeadTrackables: []httypes.HeadTrackable{(helper.lb).(httypes.HeadTrackable), cltest.HeadTrackableFunc(func(_ context.Context, head *evmtypes.Head) { - lggr.Warnf("------------ HEAD TRACKABLE (%v) --------------", head.Number) + lggr.Infof("------------ HEAD TRACKABLE (%v) --------------", head.Number) if _, exists := logsA[uint(head.Number)]; !exists { lggr.Warnf(" ** not exists") return @@ -1380,7 +1391,7 @@ func TestBroadcaster_ReceivesAllLogsWhenResubscribing(t *testing.T) { return } lggr.Warnf(" ** yup!") - chRawLogs1 <- logsA[uint(head.Number)] + chRawLogs1.TrySend(logsA[uint(head.Number)]) })}, }) @@ -1389,10 +1400,10 @@ func TestBroadcaster_ReceivesAllLogsWhenResubscribing(t *testing.T) { logListenerA.requireAllReceived(t, expectedA) <-headsDone - helper.mockEth.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(&evmtypes.Head{Number: test.blockHeight2}, nil).Once() + helper.mockEth.EthClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(&evmtypes.Head{Number: test.blockHeight2}, nil).Once() combinedLogs := append(pickLogs(logsA, test.backfillableLogs), pickLogs(logsB, test.backfillableLogs)...) - call := helper.mockEth.ethClient.On("FilterLogs", mock.Anything, mock.Anything).Return(combinedLogs, nil).Once() + call := helper.mockEth.EthClient.On("FilterLogs", mock.Anything, mock.Anything).Return(combinedLogs, nil).Once() call.Run(func(args mock.Arguments) { // Validate that the ethereum.FilterQuery is specified correctly for the backfill that we expect fromBlock := args.Get(1).(ethereum.FilterQuery).FromBlock @@ -1412,18 +1423,19 @@ func TestBroadcaster_ReceivesAllLogsWhenResubscribing(t *testing.T) { // Send second batch of new logs chRawLogs2 := <-helper.chchRawLogs - _ = cltest.SimulateIncomingHeads(t, cltest.SimulateIncomingHeadsArgs{ + headsDone = cltest.SimulateIncomingHeads(t, cltest.SimulateIncomingHeadsArgs{ StartBlock: test.blockHeight2, Blocks: blocks, HeadTrackables: []httypes.HeadTrackable{(helper.lb).(httypes.HeadTrackable), cltest.HeadTrackableFunc(func(_ context.Context, head *evmtypes.Head) { if _, exists := logsA[uint(head.Number)]; exists && batchContains(test.batch2, uint(head.Number)) { - chRawLogs2 <- logsA[uint(head.Number)] + chRawLogs2.TrySend(logsA[uint(head.Number)]) } if _, exists := logsB[uint(head.Number)]; exists && batchContains(test.batch2, uint(head.Number)) { - chRawLogs2 <- logsB[uint(head.Number)] + chRawLogs2.TrySend(logsB[uint(head.Number)]) } })}, }) + defer func() { <-headsDone }() expectedA = newReceived(pickLogs(logsA, test.expectedFilteredA)) expectedB := newReceived(pickLogs(logsB, test.expectedFilteredB)) @@ -1431,7 +1443,7 @@ func TestBroadcaster_ReceivesAllLogsWhenResubscribing(t *testing.T) { logListenerB.requireAllReceived(t, expectedB) helper.requireBroadcastCount(len(test.expectedFilteredA) + len(test.expectedFilteredB)) - helper.mockEth.ethClient.AssertExpectations(t) + helper.mockEth.EthClient.AssertExpectations(t) }) } } @@ -1525,14 +1537,14 @@ func TestBroadcaster_InjectsBroadcastRecordFunctions(t *testing.T) { chRawLogs := <-helper.chchRawLogs - chRawLogs <- log1 - chRawLogs <- log2 + chRawLogs.TrySend(log1) + chRawLogs.TrySend(log2) <-headsDone require.Eventually(t, func() bool { return len(logListener.received.getUniqueLogs()) >= 2 }, cltest.WaitTimeout(t), time.Second) helper.requireBroadcastCount(2) - helper.mockEth.ethClient.AssertExpectations(t) + helper.mockEth.EthClient.AssertExpectations(t) } func TestBroadcaster_ProcessesLogsFromReorgsAndMissedHead(t *testing.T) { @@ -1602,11 +1614,7 @@ func TestBroadcaster_ProcessesLogsFromReorgsAndMissedHead(t *testing.T) { ctx, _ := context.WithTimeout(context.Background(), cltest.WaitTimeout(t)) (helper.lb).(httypes.HeadTrackable).OnNewLongestChain(ctx, x) case types.Log: - select { - case chRawLogs <- x: - case <-time.After(cltest.WaitTimeout(t)): - t.Fatal("timed out sending log") - } + chRawLogs.TrySend(x) } time.Sleep(250 * time.Millisecond) } @@ -1621,7 +1629,7 @@ func TestBroadcaster_ProcessesLogsFromReorgsAndMissedHead(t *testing.T) { require.Equal(t, expectedA, listenerA.getUniqueLogs()) require.Equal(t, expectedB, listenerB.getUniqueLogs()) - helper.mockEth.ethClient.AssertExpectations(t) + helper.mockEth.EthClient.AssertExpectations(t) } func TestBroadcaster_BackfillsForNewListeners(t *testing.T) { @@ -1629,8 +1637,8 @@ func TestBroadcaster_BackfillsForNewListeners(t *testing.T) { const blockHeight int64 = 0 helper := newBroadcasterHelper(t, blockHeight, 2) - helper.mockEth.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(&evmtypes.Head{Number: blockHeight}, nil).Times(2) - helper.mockEth.ethClient.On("FilterLogs", mock.Anything, mock.Anything).Return(nil, nil).Times(2) + helper.mockEth.EthClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(&evmtypes.Head{Number: blockHeight}, nil).Times(2) + helper.mockEth.EthClient.On("FilterLogs", mock.Anything, mock.Anything).Return(nil, nil).Times(2) helper.start() defer helper.stop() @@ -1646,8 +1654,8 @@ func TestBroadcaster_BackfillsForNewListeners(t *testing.T) { flux_aggregator_wrapper.FluxAggregatorAnswerUpdated{}, } helper.registerWithTopics(listener1, contract, topics1, 1) - require.Eventually(t, func() bool { return helper.mockEth.subscribeCallCount() == 1 }, cltest.WaitTimeout(t), 100*time.Millisecond) - g.Consistently(func() int32 { return helper.mockEth.subscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 1 }, cltest.WaitTimeout(t), 100*time.Millisecond) + g.Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) <-helper.chchRawLogs @@ -1655,8 +1663,8 @@ func TestBroadcaster_BackfillsForNewListeners(t *testing.T) { flux_aggregator_wrapper.FluxAggregatorNewRound{}, } helper.registerWithTopics(listener2, contract, topics2, 1) - require.Eventually(t, func() bool { return helper.mockEth.subscribeCallCount() == 2 }, cltest.WaitTimeout(t), 100*time.Millisecond) - g.Consistently(func() int32 { return helper.mockEth.subscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(2))) + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 2 }, cltest.WaitTimeout(t), 100*time.Millisecond) + g.Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(2))) helper.unsubscribeAll() } @@ -1677,28 +1685,26 @@ func requireEqualLogs(t *testing.T, expectedLogs, actualLogs []types.Log) { } } -type sub struct { -} - -func (s sub) Unsubscribe() { -} - -func (s sub) Err() <-chan error { - return nil -} - func TestBroadcaster_BroadcastsWithZeroConfirmations(t *testing.T) { gm := gomega.NewWithT(t) ethClient := new(evmmocks.Client) ethClient.Test(t) + mockEth := &evmtest.MockEth{EthClient: ethClient} ethClient.On("ChainID").Return(big.NewInt(0)).Maybe() - logsChCh := make(chan chan<- types.Log) + logsChCh := make(chan evmtest.RawSub[types.Log]) ethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything). - Run(func(args mock.Arguments) { - logsChCh <- args.Get(2).(chan<- types.Log) - }). - Return(sub{}, nil) + Return( + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) ethereum.Subscription { + sub := mockEth.NewSub(t) + logsChCh <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) error { + return nil + }, + ). + Once() ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)). Return(&evmtypes.Head{Number: 1}, nil) ethClient.On("FilterLogs", mock.Anything, mock.Anything). @@ -1761,11 +1767,7 @@ func TestBroadcaster_BroadcastsWithZeroConfirmations(t *testing.T) { logs := <-logsChCh for _, log := range addr1SentLogs { - select { - case logs <- log: - case <-time.After(time.Second): - t.Error("failed to send log to log broadcaster") - } + logs.TrySend(log) } // Wait until the logpool has the 3 logs gm.Eventually(func() bool { diff --git a/core/chains/evm/log/mock_iLogPool_test.go b/core/chains/evm/log/mock_iLogPool_test.go index 2559fae11c1..4ac9dcd84c0 100644 --- a/core/chains/evm/log/mock_iLogPool_test.go +++ b/core/chains/evm/log/mock_iLogPool_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package log diff --git a/core/chains/evm/log/mocks/broadcast.go b/core/chains/evm/log/mocks/broadcast.go index 00f91399217..65fe1b2d110 100644 --- a/core/chains/evm/log/mocks/broadcast.go +++ b/core/chains/evm/log/mocks/broadcast.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/log/mocks/broadcaster.go b/core/chains/evm/log/mocks/broadcaster.go index f90e55dcacf..193f309e9a9 100644 --- a/core/chains/evm/log/mocks/broadcaster.go +++ b/core/chains/evm/log/mocks/broadcaster.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks @@ -107,6 +107,27 @@ func (_m *Broadcaster) MarkConsumed(lb log.Broadcast, qopts ...pg.QOpt) error { return r0 } +// MarkManyConsumed provides a mock function with given fields: lbs, qopts +func (_m *Broadcaster) MarkManyConsumed(lbs []log.Broadcast, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, lbs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 error + if rf, ok := ret.Get(0).(func([]log.Broadcast, ...pg.QOpt) error); ok { + r0 = rf(lbs, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // OnNewLongestChain provides a mock function with given fields: ctx, head func (_m *Broadcaster) OnNewLongestChain(ctx context.Context, head *types.Head) { _m.Called(ctx, head) diff --git a/core/chains/evm/log/mocks/config.go b/core/chains/evm/log/mocks/config.go index d48173805eb..b403eaed253 100644 --- a/core/chains/evm/log/mocks/config.go +++ b/core/chains/evm/log/mocks/config.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/log/mocks/listener.go b/core/chains/evm/log/mocks/listener.go index e020ba41215..bdf29fb5faa 100644 --- a/core/chains/evm/log/mocks/listener.go +++ b/core/chains/evm/log/mocks/listener.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/log/mocks/orm.go b/core/chains/evm/log/mocks/orm.go index e6f329773f7..eba3d64d184 100644 --- a/core/chains/evm/log/mocks/orm.go +++ b/core/chains/evm/log/mocks/orm.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks @@ -109,6 +109,27 @@ func (_m *ORM) MarkBroadcastConsumed(blockHash common.Hash, blockNumber uint64, return r0 } +// MarkBroadcastsConsumed provides a mock function with given fields: blockHashes, blockNumbers, logIndexes, jobIDs, qopts +func (_m *ORM) MarkBroadcastsConsumed(blockHashes []common.Hash, blockNumbers []uint64, logIndexes []uint, jobIDs []int32, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, blockHashes, blockNumbers, logIndexes, jobIDs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 error + if rf, ok := ret.Get(0).(func([]common.Hash, []uint64, []uint, []int32, ...pg.QOpt) error); ok { + r0 = rf(blockHashes, blockNumbers, logIndexes, jobIDs, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // MarkBroadcastsUnconsumed provides a mock function with given fields: fromBlock, qopts func (_m *ORM) MarkBroadcastsUnconsumed(fromBlock int64, qopts ...pg.QOpt) error { _va := make([]interface{}, len(qopts)) diff --git a/core/chains/evm/log/orm.go b/core/chains/evm/log/orm.go index 3761b54220a..d596649f278 100644 --- a/core/chains/evm/log/orm.go +++ b/core/chains/evm/log/orm.go @@ -2,16 +2,18 @@ package log import ( "database/sql" + "fmt" "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/pkg/errors" + "github.com/smartcontractkit/sqlx" + "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/pg" "github.com/smartcontractkit/chainlink/core/utils" - "github.com/smartcontractkit/sqlx" ) //go:generate mockery --name ORM --output ./mocks/ --case=underscore --structname ORM --filename orm.go @@ -31,6 +33,8 @@ type ORM interface { WasBroadcastConsumed(blockHash common.Hash, logIndex uint, jobID int32, qopts ...pg.QOpt) (bool, error) // MarkBroadcastConsumed marks the log broadcast as consumed by jobID. MarkBroadcastConsumed(blockHash common.Hash, blockNumber uint64, logIndex uint, jobID int32, qopts ...pg.QOpt) error + // MarkBroadcastsConsumed marks the log broadcasts as consumed by jobID. + MarkBroadcastsConsumed(blockHashes []common.Hash, blockNumbers []uint64, logIndexes []uint, jobIDs []int32, qopts ...pg.QOpt) error // MarkBroadcastsUnconsumed marks all log broadcasts from all jobs on or after fromBlock as // unconsumed. MarkBroadcastsUnconsumed(fromBlock int64, qopts ...pg.QOpt) error @@ -113,6 +117,43 @@ func (o *orm) MarkBroadcastConsumed(blockHash common.Hash, blockNumber uint64, l return errors.Wrap(err, "failed to mark log broadcast as consumed") } +// MarkBroadcastsConsumed marks many broadcasts as consumed. +// The lengths of all the provided slices must be equal, otherwise an error is returned. +func (o *orm) MarkBroadcastsConsumed(blockHashes []common.Hash, blockNumbers []uint64, logIndexes []uint, jobIDs []int32, qopts ...pg.QOpt) error { + if !utils.AllEqual(len(blockHashes), len(blockNumbers), len(logIndexes), len(jobIDs)) { + return fmt.Errorf("all arg slice lengths must be equal, got: %d %d %d %d", + len(blockHashes), len(blockNumbers), len(logIndexes), len(jobIDs), + ) + } + + type input struct { + BlockHash common.Hash `db:"blockHash"` + BlockNumber uint64 `db:"blockNumber"` + LogIndex uint `db:"logIndex"` + JobID int32 `db:"jobID"` + ChainID utils.Big `db:"chainID"` + } + inputs := make([]input, len(blockHashes)) + query := ` +INSERT INTO log_broadcasts (block_hash, block_number, log_index, job_id, created_at, updated_at, consumed, evm_chain_id) +VALUES (:blockHash, :blockNumber, :logIndex, :jobID, NOW(), NOW(), true, :chainID) +ON CONFLICT (job_id, block_hash, log_index, evm_chain_id) DO UPDATE +SET consumed = true, updated_at = NOW(); + ` + for i := range blockHashes { + inputs[i] = input{ + BlockHash: blockHashes[i], + BlockNumber: blockNumbers[i], + LogIndex: logIndexes[i], + JobID: jobIDs[i], + ChainID: o.evmChainID, + } + } + q := o.q.WithOpts(qopts...) + _, err := q.NamedExec(query, inputs) + return errors.Wrap(err, "mark broadcasts consumed") +} + // MarkBroadcastsUnconsumed implements the ORM interface. func (o *orm) MarkBroadcastsUnconsumed(fromBlock int64, qopts ...pg.QOpt) error { q := o.q.WithOpts(qopts...) diff --git a/core/chains/evm/log/orm_test.go b/core/chains/evm/log/orm_test.go index 9d57d438ac2..ee619e1637f 100644 --- a/core/chains/evm/log/orm_test.go +++ b/core/chains/evm/log/orm_test.go @@ -73,6 +73,56 @@ func TestORM_broadcasts(t *testing.T) { require.Equal(t, null.BoolFrom(true), consumed) })) + t.Run("MarkBroadcastsConsumed Success", func(t *testing.T) { + var ( + err error + blockHashes []common.Hash + blockNumbers []uint64 + logIndexes []uint + jobIDs []int32 + ) + for i := 0; i < 3; i++ { + l := cltest.RandomLog(t) + err = orm.CreateBroadcast(l.BlockHash, l.BlockNumber, l.Index, listener.JobID()) + require.NoError(t, err) + blockHashes = append(blockHashes, l.BlockHash) + blockNumbers = append(blockNumbers, l.BlockNumber) + logIndexes = append(logIndexes, l.Index) + jobIDs = append(jobIDs, listener.JobID()) + + } + err = orm.MarkBroadcastsConsumed(blockHashes, blockNumbers, logIndexes, jobIDs) + require.NoError(t, err) + + for i := range blockHashes { + was, err := orm.WasBroadcastConsumed(blockHashes[i], logIndexes[i], jobIDs[i]) + require.NoError(t, err) + require.True(t, was) + } + }) + + t.Run("MarkBroadcastsConsumed Failure", func(t *testing.T) { + var ( + err error + blockHashes []common.Hash + blockNumbers []uint64 + logIndexes []uint + jobIDs []int32 + ) + for i := 0; i < 5; i++ { + l := cltest.RandomLog(t) + err = orm.CreateBroadcast(l.BlockHash, l.BlockNumber, l.Index, listener.JobID()) + require.NoError(t, err) + blockHashes = append(blockHashes, l.BlockHash) + blockNumbers = append(blockNumbers, l.BlockNumber) + logIndexes = append(logIndexes, l.Index) + jobIDs = append(jobIDs, listener.JobID()) + + } + err = orm.MarkBroadcastsConsumed(blockHashes[:len(blockHashes)-2], blockNumbers, logIndexes, jobIDs) + require.Error(t, err) + }) + t.Run("WasBroadcastConsumed_true", func(t *testing.T) { was, err := orm.WasBroadcastConsumed(rawLog.BlockHash, rawLog.Index, listener.JobID()) require.NoError(t, err) diff --git a/core/chains/evm/log/registrations_test.go b/core/chains/evm/log/registrations_test.go index 3f18b0669db..d78227fbff5 100644 --- a/core/chains/evm/log/registrations_test.go +++ b/core/chains/evm/log/registrations_test.go @@ -83,9 +83,9 @@ func TestUnit_Registrations_addSubscriber_removeSubscriber(t *testing.T) { l := newTestListener(t, 1) topic1 := utils.NewHash() - topicValueFilters1 := [][]Topic{[]Topic{newTopic(), newTopic()}, []Topic{newTopic()}, []Topic{}} + topicValueFilters1 := [][]Topic{{newTopic(), newTopic()}, {newTopic()}, {}} topic2 := utils.NewHash() - topicValueFilters2 := [][]Topic{[]Topic{newTopic()}} + topicValueFilters2 := [][]Topic{{newTopic()}} topic3 := utils.NewHash() topicValueFilters3 := [][]Topic{} logsWithTopics := make(map[common.Hash][][]Topic) @@ -103,7 +103,7 @@ func TestUnit_Registrations_addSubscriber_removeSubscriber(t *testing.T) { // same contract, different topics l3 := newTestListener(t, 3) topic4 := utils.NewHash() - topicValueFilters4 := [][]Topic{[]Topic{newTopic()}} + topicValueFilters4 := [][]Topic{{newTopic()}} logsWithTopics3 := make(map[common.Hash][][]Topic) logsWithTopics3[topic4] = topicValueFilters4 opts3 := opts diff --git a/core/chains/evm/logpoller/integration_test.go b/core/chains/evm/logpoller/integration_test.go new file mode 100644 index 00000000000..bff860e9ad0 --- /dev/null +++ b/core/chains/evm/logpoller/integration_test.go @@ -0,0 +1,117 @@ +package logpoller_test + +import ( + "context" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/core/internal/cltest/heavyweight" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/log_emitter" + "github.com/smartcontractkit/chainlink/core/internal/testutils" + "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/utils" +) + +func TestPopulateLoadedDB(t *testing.T) { + t.Skip("only for local load testing and query analysis") + lggr := logger.TestLogger(t) + _, db := heavyweight.FullTestDB(t, "logs_scale") + chainID := big.NewInt(137) + _, err := db.Exec(`INSERT INTO evm_chains (id, created_at, updated_at) VALUES ($1, NOW(), NOW())`, utils.NewBig(chainID)) + require.NoError(t, err) + o := logpoller.NewORM(big.NewInt(137), db, lggr, pgtest.NewPGCfg(true)) + event1 := logpoller.EmitterABI.Events["Log1"].ID + address1 := common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406fbbb") + address2 := common.HexToAddress("0x6E225058950f237371261C985Db6bDe26df2200E") + + for j := 0; j < 1000; j++ { + var logs []logpoller.Log + // Max we can insert per batch + for i := 0; i < 1000; i++ { + addr := address1 + if (i+(1000*j))%2 == 0 { + addr = address2 + } + logs = append(logs, logpoller.GenLog(chainID, 1, int64(i+(1000*j)), fmt.Sprintf("0x%d", i+(1000*j)), event1[:], addr)) + } + require.NoError(t, o.InsertLogs(logs)) + } + s := time.Now() + lgs, err := o.SelectLogsByBlockRangeFilter(750000, 800000, address1, event1[:]) + require.NoError(t, err) + t.Log(time.Since(s), len(lgs)) +} + +func TestLogPoller_Integration(t *testing.T) { + lggr := logger.TestLogger(t) + db := pgtest.NewSqlxDB(t) + chainID := testutils.NewRandomEVMChainID() + _, err := db.Exec(`INSERT INTO evm_chains (id, created_at, updated_at) VALUES ($1, NOW(), NOW())`, utils.NewBig(chainID)) + require.NoError(t, err) + + // Set up a test chain with a log emitting contract deployed. + owner := testutils.MustNewSimTransactor(t) + ec := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{ + owner.From: { + Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)), + }, + }, 10e6) + t.Cleanup(func() { ec.Close() }) + emitterAddress1, _, emitter1, err := log_emitter.DeployLogEmitter(owner, ec) + require.NoError(t, err) + ec.Commit() + ec.Commit() // Block 2. Ensure we have finality number of blocks + + // Set up a log poller listening for log emitter logs. + lp := logpoller.NewLogPoller(logpoller.NewORM(chainID, db, lggr, pgtest.NewPGCfg(true)), + client.NewSimulatedBackendClient(t, ec, chainID), lggr, 100*time.Millisecond, 2, 3) + // Only filter for log1 events. + lp.MergeFilter([]common.Hash{logpoller.EmitterABI.Events["Log1"].ID}, emitterAddress1) + require.NoError(t, lp.Start(context.Background())) + + // Emit some logs in blocks 3->7. + for i := 0; i < 5; i++ { + emitter1.EmitLog1(owner, []*big.Int{big.NewInt(int64(i))}) + emitter1.EmitLog2(owner, []*big.Int{big.NewInt(int64(i))}) + ec.Commit() + } + // The poller starts on a new chain at latest-finality (5 in this case), + // replay to ensure we get all the logs. + require.NoError(t, lp.Replay(context.Background(), 1)) + + // We should eventually receive all those Log1 logs. + testutils.AssertEventually(t, func() bool { + logs, err := lp.Logs(2, 7, logpoller.EmitterABI.Events["Log1"].ID, emitterAddress1) + require.NoError(t, err) + t.Logf("Received %d/%d logs\n", len(logs), 5) + return len(logs) == 5 + }) + // Now let's update the filter and replay to get Log2 logs. + lp.MergeFilter([]common.Hash{logpoller.EmitterABI.Events["Log2"].ID}, emitterAddress1) + // Replay an invalid block should error + assert.Error(t, lp.Replay(context.Background(), 0)) + assert.Error(t, lp.Replay(context.Background(), 20)) + // Replay only from block 4, so we should see logs in block 4,5,6,7 (4 logs) + require.NoError(t, lp.Replay(context.Background(), 4)) + + // We should eventually see 4 logs2 logs. + testutils.AssertEventually(t, func() bool { + logs, err := lp.Logs(2, 7, logpoller.EmitterABI.Events["Log2"].ID, emitterAddress1) + require.NoError(t, err) + t.Logf("Received %d/%d logs\n", len(logs), 4) + return len(logs) == 4 + }) + + require.NoError(t, lp.Close()) +} diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go new file mode 100644 index 00000000000..cf92d6cd6f7 --- /dev/null +++ b/core/chains/evm/logpoller/log_poller.go @@ -0,0 +1,437 @@ +package logpoller + +import ( + "bytes" + "context" + "database/sql" + "math/big" + "sort" + "sync" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/pg" + "github.com/smartcontractkit/chainlink/core/utils" +) + +type LogPoller struct { + utils.StartStopOnce + ec client.Client + orm *ORM + lggr logger.Logger + pollPeriod time.Duration // poll period set by block production rate + finalityDepth int64 // finality depth is taken to mean that block (head - finality) is finalized + backfillBatchSize int64 // batch size to use when backfilling finalized logs + + filterMu sync.Mutex + addresses map[common.Address]struct{} + topics map[int]map[common.Hash]struct{} + + replay chan int64 + ctx context.Context + cancel context.CancelFunc + done chan struct{} +} + +func NewLogPoller(orm *ORM, ec client.Client, lggr logger.Logger, pollPeriod time.Duration, finalityDepth, backfillBatchSize int64) *LogPoller { + return &LogPoller{ + ec: ec, + orm: orm, + lggr: lggr, + replay: make(chan int64), + done: make(chan struct{}), + pollPeriod: pollPeriod, + finalityDepth: finalityDepth, + backfillBatchSize: backfillBatchSize, + addresses: make(map[common.Address]struct{}), + topics: make(map[int]map[common.Hash]struct{}), + } +} + +// MergeFilter will update the filter with the new topics and addresses. +// Clients may chose to MergeFilter and then replay in order to ensure desired logs are present. +func (lp *LogPoller) MergeFilter(topics []common.Hash, address common.Address) { + lp.filterMu.Lock() + defer lp.filterMu.Unlock() + lp.addresses[address] = struct{}{} + // [[A, B], [C]] + [[D], [], [E]] = [[A, B, D], [C], [E]] + for i := 0; i < len(topics); i++ { + if lp.topics[i] == nil { + lp.topics[i] = make(map[common.Hash]struct{}) + } + lp.topics[i][topics[i]] = struct{}{} + } +} + +func (lp *LogPoller) filterAddresses() []common.Address { + lp.filterMu.Lock() + defer lp.filterMu.Unlock() + var addresses []common.Address + for addr := range lp.addresses { + addresses = append(addresses, addr) + } + sort.Slice(addresses, func(i, j int) bool { + return bytes.Compare(addresses[i][:], addresses[j][:]) < 0 + }) + return addresses +} + +func (lp *LogPoller) filterTopics() [][]common.Hash { + lp.filterMu.Lock() + defer lp.filterMu.Unlock() + var topics [][]common.Hash + for idx := 0; idx < len(lp.topics); idx++ { + var topicPosition []common.Hash + for topic := range lp.topics[idx] { + topicPosition = append(topicPosition, topic) + } + sort.Slice(topicPosition, func(i, j int) bool { + return bytes.Compare(topicPosition[i][:], topicPosition[j][:]) < 0 + }) + topics = append(topics, topicPosition) + } + return topics +} + +// Replay signals that the poller should resume from a new block. +// Blocks until the replay starts. +func (lp *LogPoller) Replay(ctx context.Context, fromBlock int64) error { + latest, err := lp.ec.BlockByNumber(ctx, nil) + if err != nil { + return err + } + if fromBlock < 1 || uint64(fromBlock) > latest.NumberU64() { + return errors.Errorf("Invalid replay block number %v, acceptable range [1, %v]", fromBlock, latest.NumberU64()) + } + lp.replay <- fromBlock + return nil +} + +func (lp *LogPoller) Start(parentCtx context.Context) error { + return lp.StartOnce("LogPoller", func() error { + ctx, cancel := context.WithCancel(parentCtx) + lp.ctx = ctx + lp.cancel = cancel + go lp.run() + return nil + }) +} + +func (lp *LogPoller) Close() error { + return lp.StopOnce("LogPoller", func() error { + lp.cancel() + <-lp.done + return nil + }) +} + +func (lp *LogPoller) run() { + defer close(lp.done) + tick := time.After(0) + var start int64 + for { + select { + case <-lp.ctx.Done(): + return + case fromBlock := <-lp.replay: + lp.lggr.Warnw("Replay requested", "from", fromBlock) + start = fromBlock + case <-tick: + tick = time.After(utils.WithJitter(lp.pollPeriod)) + if start != 0 { + start = lp.pollAndSaveLogs(lp.ctx, start) + continue + } + // Otherwise, still need initial start + lastProcessed, err := lp.orm.SelectLatestBlock(pg.WithParentCtx(lp.ctx)) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + lp.lggr.Errorw("unable to get starting block", "err", err) + continue + } + // Otherwise this is the first poll _ever_ on a new chain. + // Only safe thing to do is to start at finality depth behind tip. + latest, err := lp.ec.BlockByNumber(context.Background(), nil) + if err != nil { + lp.lggr.Warnw("unable to get latest for first poll", "err", err) + continue + } + // Do not support polling chains with don't even have finality depth worth of blocks. + // Could conceivably support this but not worth the effort. + if int64(latest.NumberU64()) < lp.finalityDepth { + lp.lggr.Warnw("insufficient number of blocks on chain, waiting for finality depth", "err", err, "latest", latest.NumberU64()) + continue + } + start = int64(latest.NumberU64()) - lp.finalityDepth + 1 + } else { + start = lastProcessed.BlockNumber + 1 + } + } + } +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func convertLogs(chainID *big.Int, logs []types.Log) []Log { + var lgs []Log + for _, l := range logs { + lgs = append(lgs, Log{ + EvmChainId: utils.NewBig(chainID), + LogIndex: int64(l.Index), + BlockHash: l.BlockHash, + // We assume block numbers fit in int64 + // in many places. + BlockNumber: int64(l.BlockNumber), + EventSig: l.Topics[0].Bytes(), // First topic is always event signature. + Topics: convertTopics(l.Topics), + Address: l.Address, + TxHash: l.TxHash, + Data: l.Data, + }) + } + return lgs +} + +func convertTopics(topics []common.Hash) [][]byte { + var topicsForDB [][]byte + for _, t := range topics { + topicsForDB = append(topicsForDB, t.Bytes()) + } + return topicsForDB +} + +func (lp *LogPoller) backfill(ctx context.Context, start, end int64) int64 { + for from := start; from <= end; from += lp.backfillBatchSize { + var ( + logs []types.Log + err error + ) + to := min(from+lp.backfillBatchSize-1, end) + // Retry forever to query for logs, + // unblocked by resolving node connectivity issues. + utils.RetryWithBackoff(ctx, func() bool { + logs, err = lp.ec.FilterLogs(ctx, ethereum.FilterQuery{ + FromBlock: big.NewInt(from), + ToBlock: big.NewInt(to), + Addresses: lp.filterAddresses(), + Topics: lp.filterTopics(), + }) + if err != nil { + lp.lggr.Warnw("Unable query for logs, retrying", "err", err, "from", from, "to", to) + return true + } + return false + }) + if len(logs) == 0 { + continue + } + lp.lggr.Infow("Backfill found logs", "from", from, "to", to, "logs", len(logs)) + // Retry forever to save logs, + // unblocked by resolving db connectivity issues. + utils.RetryWithBackoff(ctx, func() bool { + if err := lp.orm.InsertLogs(convertLogs(lp.ec.ChainID(), logs)); err != nil { + lp.lggr.Warnw("Unable to insert logs logs, retrying", "err", err, "from", from, "to", to) + return true + } + return false + }) + } + return end + 1 +} + +func (lp *LogPoller) maybeHandleReorg(ctx context.Context, currentBlockNumber, latestBlockNumber int64) (*types.Block, bool, int64, error) { + currentBlock, err1 := lp.ec.BlockByNumber(ctx, big.NewInt(currentBlockNumber)) + if err1 != nil { + lp.lggr.Warnw("Unable to get currentBlock", "err", err1, "currentBlockNumber", currentBlockNumber) + return nil, false, currentBlockNumber, err1 + } + // Does this currentBlock point to the same parent that we have saved? + // If not, there was a reorg, so we need to rewind. + expectedParent, err1 := lp.orm.SelectBlockByNumber(currentBlockNumber - 1) + if err1 != nil && !errors.Is(err1, sql.ErrNoRows) { + // If err is not a no rows error, assume transient db issue and retry + lp.lggr.Warnw("Unable to read latestBlockNumber currentBlock saved", "err", err1, "currentBlockNumber", currentBlockNumber) + return nil, false, 0, errors.New("Unable to read latestBlockNumber currentBlock saved") + } + // We will not have the previous currentBlock on initial poll or after a backfill. + havePreviousBlock := !errors.Is(err1, sql.ErrNoRows) + if havePreviousBlock && (currentBlock.ParentHash() != expectedParent.BlockHash) { + // There can be another reorg while we're finding the LCA. + // That is ok, since we'll detect it on the next iteration. + // Since we go currentBlock by currentBlock for unfinalized logs, the mismatch starts at currentBlockNumber currentBlock - 1. + lca, err2 := lp.findLCA(currentBlock.ParentHash()) + if err2 != nil { + lp.lggr.Warnw("Unable to find LCA after reorg, retrying", "err", err2) + return nil, false, 0, errors.New("Unable to find LCA after reorg, retrying") + } + + lp.lggr.Infow("Re-org detected", "lca", lca, "currentBlockNumber", currentBlockNumber, "latestBlockNumber", latestBlockNumber) + // We truncate all the blocks and logs after the LCA. + // We could preserve the logs for forensics, since its possible + // that applications see them and take action upon it, however that + // results in significantly slower reads since we must then compute + // the canonical set per read. Typically if an application took action on a log + // it would be saved elsewhere e.g. eth_txes, so it seems better to just support the fast reads. + // Its also nicely analogous to reading from the chain itself. + err2 = lp.orm.q.Transaction(func(tx pg.Queryer) error { + // These deletes are bounded by reorg depth, so they are + // fast and should not slow down the log readers. + err2 = lp.orm.DeleteRangeBlocks(lca+1, latestBlockNumber, pg.WithQueryer(tx)) + if err2 != nil { + lp.lggr.Warnw("Unable to clear reorged blocks, retrying", "err", err2) + return err2 + } + err2 = lp.orm.DeleteLogs(lca+1, latestBlockNumber, pg.WithQueryer(tx)) + if err2 != nil { + lp.lggr.Warnw("Unable to clear reorged logs, retrying", "err", err2) + return err2 + } + return nil + }) + if err2 != nil { + // If we crash or fail to update state we simply do not increment currentBlockNumber so we'll detect the same + // reorg (if still present) and retry. + return nil, false, 0, err2 + } + return currentBlock, true, lca + 1, nil + } + return currentBlock, false, 0, nil +} + +// pollAndSaveLogs On startup/crash current is the first block after the last processed block. +func (lp *LogPoller) pollAndSaveLogs(ctx context.Context, currentBlockNumber int64) int64 { + lp.lggr.Infow("Polling for logs", "currentBlockNumber", currentBlockNumber) + // Get latestBlockNumber block on chain + latestBlock, err1 := lp.ec.BlockByNumber(ctx, nil) + if err1 != nil { + lp.lggr.Warnw("Unable to get latestBlockNumber block", "err", err1, "currentBlockNumber", currentBlockNumber) + return currentBlockNumber + } + latestBlockNumber := latestBlock.Number().Int64() + if currentBlockNumber > latestBlockNumber { + lp.lggr.Debugw("No new blocks since last poll", "currentBlockNumber", currentBlockNumber, "latestBlockNumber", currentBlockNumber) + return currentBlockNumber + } + // Possibly handle a reorg + _, reorgDetected, newPollBlockNumber, err1 := lp.maybeHandleReorg(ctx, currentBlockNumber, latestBlockNumber) + if err1 != nil { + // Continuously retry from same block on any error in reorg handling. + return currentBlockNumber + } + // If we did detect a reorg, we'll have a new block number to start from (LCA+1) + // so let's resume polling from there. + if reorgDetected { + currentBlockNumber = newPollBlockNumber + } + + // Backfill finalized blocks if we can for performance. + // E.g. 1<-2<-3(currentBlockNumber)<-4<-5<-6<-7(latestBlockNumber), finality is 2. So 3,4,5 can be batched. + // start = currentBlockNumber = 3, end = latestBlockNumber - finality = 7-2 = 5 (inclusive range). + if (latestBlockNumber - currentBlockNumber) >= lp.finalityDepth { + lp.lggr.Infow("Backfilling logs", "start", currentBlockNumber, "end", latestBlockNumber-lp.finalityDepth) + currentBlockNumber = lp.backfill(ctx, currentBlockNumber, latestBlockNumber-lp.finalityDepth) + } + + for currentBlockNumber <= latestBlockNumber { + // Same reorg detection on unfinalized blocks. + // Get currentBlockNumber block + currentBlock, reorgDetected, newPollBlock, err2 := lp.maybeHandleReorg(ctx, currentBlockNumber, latestBlockNumber) + if err2 != nil { + return currentBlockNumber + } + if reorgDetected { + currentBlockNumber = newPollBlock + continue + } + + h := currentBlock.Hash() + logs, err2 := lp.ec.FilterLogs(ctx, ethereum.FilterQuery{ + BlockHash: &h, + Addresses: lp.filterAddresses(), + Topics: lp.filterTopics(), + }) + if err2 != nil { + lp.lggr.Warnw("Unable query for logs, retrying", "err", err2, "block", currentBlock.Number()) + return currentBlockNumber + } + lp.lggr.Infow("Unfinalized log query", "logs", len(logs), "currentBlockNumber", currentBlockNumber) + err2 = lp.orm.q.Transaction(func(q pg.Queryer) error { + if err3 := lp.orm.InsertBlock(currentBlock.Hash(), currentBlock.Number().Int64()); err3 != nil { + return err3 + } + if len(logs) == 0 { + return nil + } + return lp.orm.InsertLogs(convertLogs(lp.ec.ChainID(), logs)) + }) + if err2 != nil { + // If we're unable to insert, don't increment currentBlockNumber and just retry + lp.lggr.Warnw("Unable to save logs, retrying", "err", err2, "block", currentBlock.Number()) + return currentBlockNumber + } + currentBlockNumber++ + } + return currentBlockNumber +} + +func (lp *LogPoller) findLCA(h common.Hash) (int64, error) { + // Find the first place where our chain and their chain have the same block, + // that block number is the LCA. + block, err := lp.ec.BlockByHash(context.Background(), h) + if err != nil { + return 0, err + } + blockNumber := block.Number().Int64() + startBlockNumber := blockNumber + for blockNumber >= (startBlockNumber - lp.finalityDepth) { + ourBlockHash, err := lp.orm.SelectBlockByNumber(blockNumber) + if err != nil { + return 0, err + } + if block.Hash() == ourBlockHash.BlockHash { + // If we do have the blockhash, that is the LCA + return blockNumber, nil + } + blockNumber-- + block, err = lp.ec.BlockByHash(context.Background(), block.ParentHash()) + if err != nil { + return 0, err + } + } + lp.lggr.Criticalw("Reorg greater than finality depth detected", "finality", lp.finalityDepth) + return 0, errors.New("reorg greater than finality depth") +} + +// Logs returns logs matching topics and address (exactly) in the given block range, +// which are canonical at time of query. +func (lp *LogPoller) Logs(start, end int64, eventSig common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectLogsByBlockRangeFilter(start, end, address, eventSig[:], qopts...) +} + +func (lp *LogPoller) LatestBlock(qopts ...pg.QOpt) (int64, error) { + b, err := lp.orm.SelectLatestBlock(qopts...) + if err != nil { + return 0, err + } + return b.BlockNumber, nil +} + +// LatestLogByEventSigWithConfs finds the latest log that has confs number of blocks on top of the log. +func (lp *LogPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs int, qopts ...pg.QOpt) (*Log, error) { + log, err := lp.orm.SelectLatestLogEventSigWithConfs(eventSig, address, confs, qopts...) + if err != nil { + return nil, err + } + return log, nil +} diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go new file mode 100644 index 00000000000..86404639420 --- /dev/null +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -0,0 +1,347 @@ +package logpoller + +import ( + "context" + "database/sql" + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/ethereum/go-ethereum/core" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/log_emitter" + "github.com/smartcontractkit/chainlink/core/internal/testutils" + "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/utils" +) + +var ( + EmitterABI, _ = abi.JSON(strings.NewReader(log_emitter.LogEmitterABI)) +) + +func GenLog(chainID *big.Int, logIndex int64, blockNum int64, blockHash string, topic1 []byte, address common.Address) Log { + return Log{ + EvmChainId: utils.NewBig(chainID), + LogIndex: logIndex, + BlockHash: common.HexToHash(blockHash), + BlockNumber: blockNum, + EventSig: topic1, + Topics: [][]byte{topic1}, + Address: address, + TxHash: common.HexToHash("0x1234"), + Data: []byte("hello"), + } +} + +func assertDontHave(t *testing.T, start, end int, orm *ORM) { + for i := start; i < end; i++ { + _, err := orm.SelectBlockByNumber(int64(i)) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + } +} + +func assertHaveCanonical(t *testing.T, start, end int, ec *backends.SimulatedBackend, orm *ORM) { + for i := start; i < end; i++ { + blk, err := orm.SelectBlockByNumber(int64(i)) + require.NoError(t, err, "block %v", i) + chainBlk, err := ec.BlockByNumber(context.Background(), big.NewInt(int64(i))) + assert.Equal(t, chainBlk.Hash().Bytes(), blk.BlockHash.Bytes(), "block %v", i) + } +} + +func TestLogPoller_PollAndSaveLogs(t *testing.T) { + lggr := logger.TestLogger(t) + db := pgtest.NewSqlxDB(t) + chainID := testutils.NewRandomEVMChainID() + require.NoError(t, utils.JustError(db.Exec(`SET CONSTRAINTS log_poller_blocks_evm_chain_id_fkey DEFERRED`))) + require.NoError(t, utils.JustError(db.Exec(`SET CONSTRAINTS logs_evm_chain_id_fkey DEFERRED`))) + + // Set up a test chain with a log emitting contract deployed. + orm := NewORM(chainID, db, lggr, pgtest.NewPGCfg(true)) + owner := testutils.MustNewSimTransactor(t) + ec := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{ + owner.From: { + Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)), + }, + }, 10e6) + t.Cleanup(func() { ec.Close() }) + emitterAddress1, _, emitter1, err := log_emitter.DeployLogEmitter(owner, ec) + require.NoError(t, err) + emitterAddress2, _, emitter2, err := log_emitter.DeployLogEmitter(owner, ec) + require.NoError(t, err) + ec.Commit() + + // Set up a log poller listening for log emitter logs. + lp := NewLogPoller(orm, client.NewSimulatedBackendClient(t, ec, chainID), lggr, 15*time.Second, 2, 3) + lp.MergeFilter([]common.Hash{EmitterABI.Events["Log1"].ID}, emitterAddress1) + lp.MergeFilter([]common.Hash{EmitterABI.Events["Log2"].ID}, emitterAddress2) + + b, err := ec.BlockByNumber(context.Background(), nil) + require.NoError(t, err) + require.Equal(t, uint64(1), b.NumberU64()) + + // Test scenario: single block in chain, no logs. + // Chain genesis <- 1 + // DB: empty + newStart := lp.pollAndSaveLogs(context.Background(), 1) + assert.Equal(t, int64(2), newStart) + + // We expect to have saved block 1. + lpb, err := orm.SelectBlockByNumber(1) + require.NoError(t, err) + assert.Equal(t, lpb.BlockHash, b.Hash()) + assert.Equal(t, lpb.BlockNumber, int64(b.NumberU64())) + assert.Equal(t, int64(1), int64(b.NumberU64())) + // No logs. + lgs, err := orm.selectLogsByBlockRange(1, 1) + require.NoError(t, err) + assert.Equal(t, 0, len(lgs)) + assertHaveCanonical(t, 1, 1, ec, orm) + + // Polling again should be a noop, since we are at the latest. + newStart = lp.pollAndSaveLogs(context.Background(), newStart) + assert.Equal(t, int64(2), newStart) + latest, err := orm.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(1), latest.BlockNumber) + assertHaveCanonical(t, 1, 1, ec, orm) + + // Test scenario: one log 2 block chain. + // Chain gen <- 1 <- 2 (L1) + // DB: 1 + _, err = emitter1.EmitLog1(owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err) + ec.Commit() + + // Polling should get us the L1 log. + newStart = lp.pollAndSaveLogs(context.Background(), newStart) + assert.Equal(t, int64(3), newStart) + latest, err = orm.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(2), latest.BlockNumber) + lgs, err = orm.selectLogsByBlockRange(1, 3) + require.NoError(t, err) + require.Equal(t, 1, len(lgs)) + assert.Equal(t, emitterAddress1, lgs[0].Address) + assert.Equal(t, latest.BlockHash, lgs[0].BlockHash) + assert.Equal(t, hexutil.Encode(lgs[0].Topics[0]), EmitterABI.Events["Log1"].ID.String()) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), + lgs[0].Data) + + // Test scenario: single block reorg with log. + // Chain gen <- 1 <- 2 (L1_1) + // \ 2'(L1_2) <- 3 + // DB: 1, 2 + // - Detect a reorg, + // - Update the block 2's hash + // - Save L1' + // - L1_1 deleted + reorgedOutBlock, err := ec.BlockByNumber(context.Background(), big.NewInt(2)) + require.NoError(t, err) + lca, err := ec.BlockByNumber(context.Background(), big.NewInt(1)) + require.NoError(t, err) + require.NoError(t, ec.Fork(context.Background(), lca.Hash())) + _, err = emitter1.EmitLog1(owner, []*big.Int{big.NewInt(2)}) + require.NoError(t, err) + // Create 2' + ec.Commit() + // Create 3 (we need a new block for us to do any polling and detect the reorg). + ec.Commit() + + newStart = lp.pollAndSaveLogs(context.Background(), newStart) + assert.Equal(t, int64(4), newStart) + latest, err = orm.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(3), latest.BlockNumber) + lgs, err = orm.selectLogsByBlockRange(1, 3) + require.NoError(t, err) + require.Equal(t, 1, len(lgs)) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000002`), lgs[0].Data) + assertHaveCanonical(t, 1, 3, ec, orm) + + // Test scenario: reorg back to previous tip. + // Chain gen <- 1 <- 2 (L1_1) <- 3' (L1_3) <- 4 + // \ 2'(L1_2) <- 3 + require.NoError(t, ec.Fork(context.Background(), reorgedOutBlock.Hash())) + _, err = emitter1.EmitLog1(owner, []*big.Int{big.NewInt(3)}) + require.NoError(t, err) + // Create 3' + ec.Commit() + // Create 4 + ec.Commit() + newStart = lp.pollAndSaveLogs(context.Background(), newStart) + assert.Equal(t, int64(5), newStart) + latest, err = orm.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(4), latest.BlockNumber) + lgs, err = orm.selectLogsByBlockRange(1, 3) + // We expect ONLY L1_1 and L1_3 since L1_2 is reorg'd out. + assert.Equal(t, 2, len(lgs)) + assert.Equal(t, int64(2), lgs[0].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), lgs[0].Data) + assert.Equal(t, int64(3), lgs[1].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000003`), lgs[1].Data) + assertHaveCanonical(t, 1, 1, ec, orm) + assertHaveCanonical(t, 3, 4, ec, orm) + assertDontHave(t, 2, 2, orm) // 2 gets backfilled + + // Test scenario: multiple logs per block for many blocks (also after reorg). + // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) + // \ 2'(L1_2) <- 3 + // DB: 1, 2', 3' + // - Should save 4, 5, 6 blocks + // - Should obtain logs L1_3, L2_5, L1_6 + _, err = emitter1.EmitLog1(owner, []*big.Int{big.NewInt(4)}) + require.NoError(t, err) + _, err = emitter2.EmitLog1(owner, []*big.Int{big.NewInt(5)}) + require.NoError(t, err) + // Create 4 + ec.Commit() + _, err = emitter1.EmitLog1(owner, []*big.Int{big.NewInt(6)}) + require.NoError(t, err) + // Create 5 + ec.Commit() + + newStart = lp.pollAndSaveLogs(context.Background(), newStart) + assert.Equal(t, int64(7), newStart) + lgs, err = orm.selectLogsByBlockRange(4, 6) + require.NoError(t, err) + require.Equal(t, 3, len(lgs)) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000004`), lgs[0].Data) + assert.Equal(t, emitterAddress1, lgs[0].Address) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000005`), lgs[1].Data) + assert.Equal(t, emitterAddress2, lgs[1].Address) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000006`), lgs[2].Data) + assert.Equal(t, emitterAddress1, lgs[2].Address) + assertHaveCanonical(t, 1, 1, ec, orm) + assertDontHave(t, 2, 2, orm) // 2 gets backfilled + assertHaveCanonical(t, 3, 6, ec, orm) + + // Test scenario: node down for exactly finality + 1 block + // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) <- 7 (L1_7) <- 8 (L1_8) <- 9 (L1_9) + // \ 2'(L1_2) <- 3 + // DB: 1, 2, 3, 4, 5, 6 + // - We expect block 7 to backfilled (treated as finalized) + // - Then block 8-9 to be handled block by block (treated as unfinalized). + for i := 7; i < 10; i++ { + _, err = emitter1.EmitLog1(owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + ec.Commit() + } + newStart = lp.pollAndSaveLogs(context.Background(), newStart) + assert.Equal(t, int64(10), newStart) + lgs, err = orm.selectLogsByBlockRange(7, 9) + require.NoError(t, err) + require.Equal(t, 3, len(lgs)) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000007`), lgs[0].Data) + assert.Equal(t, int64(7), lgs[0].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000008`), lgs[1].Data) + assert.Equal(t, int64(8), lgs[1].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000009`), lgs[2].Data) + assert.Equal(t, int64(9), lgs[2].BlockNumber) + assertDontHave(t, 7, 7, orm) // Do not expect to save backfilled blocks. + assertHaveCanonical(t, 8, 9, ec, orm) + + // Test scenario large backfill (multiple batches) + // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) <- 7 (L1_7) <- 8 (L1_8) <- 9 (L1_9) <- 10..15 + // \ 2'(L1_2) <- 3 + // DB: 1, 2, 3, 4, 5, 6, (backfilled 7), 8, 9 + // - 10, 11, 12 backfilled in batch 1 + // - 13 backfilled in batch 2 + // - 14, 15 to be treated as unfinalized + for i := 10; i < 16; i++ { + _, err = emitter1.EmitLog1(owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + ec.Commit() + } + newStart = lp.pollAndSaveLogs(context.Background(), newStart) + assert.Equal(t, int64(16), newStart) + lgs, err = orm.selectLogsByBlockRange(10, 15) + require.NoError(t, err) + assert.Equal(t, 6, len(lgs)) + assertHaveCanonical(t, 14, 15, ec, orm) + assertDontHave(t, 10, 13, orm) // Do not expect to save backfilled blocks. +} + +func TestLogPoller_Logs(t *testing.T) { + lggr := logger.TestLogger(t) + chainID := testutils.NewRandomEVMChainID() + db := pgtest.NewSqlxDB(t) + require.NoError(t, utils.JustError(db.Exec(`SET CONSTRAINTS log_poller_blocks_evm_chain_id_fkey DEFERRED`))) + require.NoError(t, utils.JustError(db.Exec(`SET CONSTRAINTS logs_evm_chain_id_fkey DEFERRED`))) + o := NewORM(chainID, db, lggr, pgtest.NewPGCfg(true)) + event1 := EmitterABI.Events["Log1"].ID + event2 := EmitterABI.Events["Log2"].ID + address1 := common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406fbbb") + address2 := common.HexToAddress("0x6E225058950f237371261C985Db6bDe26df2200E") + + // Block 1-3 + require.NoError(t, o.InsertLogs([]Log{ + GenLog(chainID, 1, 1, "0x3", event1[:], address1), + GenLog(chainID, 2, 1, "0x3", event2[:], address2), + GenLog(chainID, 1, 2, "0x4", event1[:], address2), + GenLog(chainID, 2, 2, "0x4", event2[:], address1), + GenLog(chainID, 1, 3, "0x5", event1[:], address1), + GenLog(chainID, 2, 3, "0x5", event2[:], address2), + })) + + // Select for all addresses + lgs, err := o.selectLogsByBlockRange(1, 3) + require.NoError(t, err) + require.Equal(t, 6, len(lgs)) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000003", lgs[0].BlockHash.String()) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000003", lgs[1].BlockHash.String()) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000004", lgs[2].BlockHash.String()) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000004", lgs[3].BlockHash.String()) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000005", lgs[4].BlockHash.String()) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000005", lgs[5].BlockHash.String()) + + // Filter by address and topic + lgs, err = o.SelectLogsByBlockRangeFilter(1, 3, address1, event1[:]) + require.NoError(t, err) + require.Equal(t, 2, len(lgs)) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000003", lgs[0].BlockHash.String()) + assert.Equal(t, address1, lgs[0].Address) + assert.Equal(t, event1.Bytes(), lgs[0].Topics[0]) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000005", lgs[1].BlockHash.String()) + assert.Equal(t, address1, lgs[1].Address) + + // Filter by block + lgs, err = o.SelectLogsByBlockRangeFilter(2, 2, address2, event1[:]) + require.NoError(t, err) + require.Equal(t, 1, len(lgs)) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000004", lgs[0].BlockHash.String()) + assert.Equal(t, int64(1), lgs[0].LogIndex) + assert.Equal(t, address2, lgs[0].Address) + assert.Equal(t, event1.Bytes(), lgs[0].Topics[0]) +} + +func TestLogPoller_MergeFilter(t *testing.T) { + lp := NewLogPoller(nil, nil, nil, 15*time.Second, 1, 1) + a1 := common.HexToAddress("0x2ab9a2dc53736b361b72d900cdf9f78f9406fbbb") + a2 := common.HexToAddress("0x2ab9a2dc53736b361b72d900cdf9f78f9406fbbc") + lp.MergeFilter([]common.Hash{EmitterABI.Events["Log1"].ID}, a1) + assert.Equal(t, []common.Address{a1}, lp.filterAddresses()) + assert.Equal(t, [][]common.Hash{{EmitterABI.Events["Log1"].ID}}, lp.filterTopics()) + + // Should de-dupe topics in same position + lp.MergeFilter([]common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, a2) + assert.Equal(t, []common.Address{a1, a2}, lp.filterAddresses()) + assert.Equal(t, [][]common.Hash{{EmitterABI.Events["Log1"].ID}, {EmitterABI.Events["Log2"].ID}}, lp.filterTopics()) + + // Should de-dupe addresses + lp.MergeFilter([]common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, a2) + assert.Equal(t, []common.Address{a1, a2}, lp.filterAddresses()) + assert.Equal(t, [][]common.Hash{{EmitterABI.Events["Log1"].ID}, {EmitterABI.Events["Log2"].ID}}, lp.filterTopics()) +} diff --git a/core/chains/evm/logpoller/models.go b/core/chains/evm/logpoller/models.go new file mode 100644 index 00000000000..e0dfe0c1f07 --- /dev/null +++ b/core/chains/evm/logpoller/models.go @@ -0,0 +1,34 @@ +package logpoller + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + + "github.com/smartcontractkit/chainlink/core/utils" +) + +// LogPollerBlock represents an unfinalized block +// used for reorg detection when polling. +type LogPollerBlock struct { + EvmChainId *utils.Big + BlockHash common.Hash + // Note geth uses int64 internally https://github.com/ethereum/go-ethereum/blob/f66f1a16b3c480d3a43ac7e8a09ab3e362e96ae4/eth/filters/api.go#L340 + BlockNumber int64 + CreatedAt time.Time +} + +// Log represents an EVM log. +type Log struct { + EvmChainId *utils.Big + LogIndex int64 + BlockHash common.Hash + BlockNumber int64 + Topics pq.ByteaArray + EventSig []byte + Address common.Address + TxHash common.Hash + Data []byte + CreatedAt time.Time +} diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go new file mode 100644 index 00000000000..0d6814e8863 --- /dev/null +++ b/core/chains/evm/logpoller/orm.go @@ -0,0 +1,131 @@ +package logpoller + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/smartcontractkit/sqlx" + + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/pg" + "github.com/smartcontractkit/chainlink/core/utils" +) + +type ORM struct { + chainID *big.Int + q pg.Q +} + +// NewORM creates an ORM scoped to chainID. +func NewORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger, cfg pg.LogConfig) *ORM { + namedLogger := lggr.Named("ORM") + q := pg.NewQ(db, namedLogger, cfg) + return &ORM{ + chainID: chainID, + q: q, + } +} + +// InsertBlock is idempotent to support replays. +func (o *ORM) InsertBlock(h common.Hash, n int64, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + _, err := q.Exec(`INSERT INTO log_poller_blocks (evm_chain_id, block_hash, block_number, created_at) + VALUES ($1, $2, $3, NOW()) ON CONFLICT DO NOTHING`, utils.NewBig(o.chainID), h[:], n) + return err +} + +func (o *ORM) SelectBlockByHash(h common.Hash, qopts ...pg.QOpt) (*LogPollerBlock, error) { + q := o.q.WithOpts(qopts...) + var b LogPollerBlock + if err := q.Get(&b, `SELECT * FROM log_poller_blocks WHERE block_hash = $1 AND evm_chain_id = $2`, h, utils.NewBig(o.chainID)); err != nil { + return nil, err + } + return &b, nil +} + +func (o *ORM) SelectBlockByNumber(n int64, qopts ...pg.QOpt) (*LogPollerBlock, error) { + q := o.q.WithOpts(qopts...) + var b LogPollerBlock + if err := q.Get(&b, `SELECT * FROM log_poller_blocks WHERE block_number = $1 AND evm_chain_id = $2`, n, utils.NewBig(o.chainID)); err != nil { + return nil, err + } + return &b, nil +} + +func (o *ORM) SelectLatestBlock(qopts ...pg.QOpt) (*LogPollerBlock, error) { + q := o.q.WithOpts(qopts...) + var b LogPollerBlock + if err := q.Get(&b, `SELECT * FROM log_poller_blocks WHERE evm_chain_id = $1 ORDER BY block_number DESC LIMIT 1`, utils.NewBig(o.chainID)); err != nil { + return nil, err + } + return &b, nil +} + +func (o *ORM) SelectLatestLogEventSigWithConfs(eventSig common.Hash, address common.Address, confs int, qopts ...pg.QOpt) (*Log, error) { + q := o.q.WithOpts(qopts...) + var l Log + if err := q.Get(&l, `SELECT * FROM logs + WHERE evm_chain_id = $1 + AND event_sig = $2 + AND address = $3 + AND (block_number + $4) <= (SELECT COALESCE(block_number, 0) FROM log_poller_blocks WHERE evm_chain_id = $1 ORDER BY block_number DESC LIMIT 1) + ORDER BY (block_number, log_index) DESC LIMIT 1`, utils.NewBig(o.chainID), eventSig, address, confs); err != nil { + return nil, err + } + return &l, nil +} + +func (o *ORM) DeleteRangeBlocks(start, end int64, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + _, err := q.Exec(`DELETE FROM log_poller_blocks WHERE block_number >= $1 AND block_number <= $2 AND evm_chain_id = $3`, start, end, utils.NewBig(o.chainID)) + return err +} + +func (o *ORM) DeleteLogs(start, end int64, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + _, err := q.Exec(`DELETE FROM logs WHERE block_number >= $1 AND block_number <= $2 AND evm_chain_id = $3`, start, end, utils.NewBig(o.chainID)) + return err +} + +// InsertLogs is idempotent to support replays. +func (o *ORM) InsertLogs(logs []Log, qopts ...pg.QOpt) error { + for _, log := range logs { + if o.chainID.Cmp(log.EvmChainId.ToInt()) != 0 { + return errors.Errorf("invalid chainID in log got %v want %v", log.EvmChainId.ToInt(), o.chainID) + } + } + q := o.q.WithOpts(qopts...) + _, err := q.NamedExec(`INSERT INTO logs +(evm_chain_id, log_index, block_hash, block_number, address, event_sig, topics, tx_hash, data, created_at) VALUES +(:evm_chain_id, :log_index, :block_hash, :block_number, :address, :event_sig, :topics, :tx_hash, :data, NOW()) ON CONFLICT DO NOTHING`, logs) + return err +} + +func (o *ORM) selectLogsByBlockRange(start, end int64) ([]Log, error) { + var logs []Log + err := o.q.Select(&logs, ` + SELECT * FROM logs + WHERE block_number >= $1 AND block_number <= $2 AND evm_chain_id = $3 + ORDER BY (block_number, log_index, created_at)`, start, end, utils.NewBig(o.chainID)) + if err != nil { + return nil, err + } + return logs, nil +} + +// SelectLogsByBlockRangeFilter finds the latest logs by block. +// Assumes that logs inserted later for a given block are "more" canonical. +func (o *ORM) SelectLogsByBlockRangeFilter(start, end int64, address common.Address, eventSig []byte, qopts ...pg.QOpt) ([]Log, error) { + var logs []Log + q := o.q.WithOpts(qopts...) + err := q.Select(&logs, ` + SELECT * FROM logs + WHERE logs.block_number >= $1 AND logs.block_number <= $2 AND logs.evm_chain_id = $3 + AND address = $4 AND event_sig = $5 + ORDER BY (logs.block_number, logs.log_index)`, start, end, utils.NewBig(o.chainID), address, eventSig) + if err != nil { + return nil, err + } + return logs, nil +} diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go new file mode 100644 index 00000000000..6a7b008df8b --- /dev/null +++ b/core/chains/evm/logpoller/orm_test.go @@ -0,0 +1,111 @@ +package logpoller + +import ( + "database/sql" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/utils" +) + +func TestORM(t *testing.T) { + db := pgtest.NewSqlxDB(t) + lggr := logger.TestLogger(t) + require.NoError(t, utils.JustError(db.Exec(`SET CONSTRAINTS log_poller_blocks_evm_chain_id_fkey DEFERRED`))) + require.NoError(t, utils.JustError(db.Exec(`SET CONSTRAINTS logs_evm_chain_id_fkey DEFERRED`))) + o1 := NewORM(big.NewInt(137), db, lggr, pgtest.NewPGCfg(true)) + o2 := NewORM(big.NewInt(138), db, lggr, pgtest.NewPGCfg(true)) + + // Insert and read back a block. + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10)) + b, err := o1.SelectBlockByHash(common.HexToHash("0x1234")) + require.NoError(t, err) + assert.Equal(t, b.BlockNumber, int64(10)) + assert.Equal(t, b.BlockHash.Bytes(), common.HexToHash("0x1234").Bytes()) + assert.Equal(t, b.EvmChainId.String(), "137") + + // Insert blocks from a different chain + require.NoError(t, o2.InsertBlock(common.HexToHash("0x1234"), 11)) + require.NoError(t, o2.InsertBlock(common.HexToHash("0x1235"), 12)) + b2, err := o2.SelectBlockByHash(common.HexToHash("0x1234")) + require.NoError(t, err) + assert.Equal(t, b2.BlockNumber, int64(11)) + assert.Equal(t, b2.BlockHash.Bytes(), common.HexToHash("0x1234").Bytes()) + assert.Equal(t, b2.EvmChainId.String(), "138") + + latest, err := o1.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(10), latest.BlockNumber) + + latest, err = o2.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(12), latest.BlockNumber) + + // Delete a block + require.NoError(t, o1.DeleteRangeBlocks(10, 10)) + _, err = o1.SelectBlockByHash(common.HexToHash("0x1234")) + require.Error(t, err) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + + // Delete block from another chain. + require.NoError(t, o2.DeleteRangeBlocks(11, 11)) + _, err = o2.SelectBlockByHash(common.HexToHash("0x1234")) + require.Error(t, err) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + + // Should be able to insert and read back a log. + topic := common.HexToHash("0x1599") + require.NoError(t, o1.InsertLogs([]Log{ + { + EvmChainId: utils.NewBigI(137), + LogIndex: 1, + BlockHash: common.HexToHash("0x1234"), + BlockNumber: int64(10), + EventSig: topic[:], + Topics: [][]byte{topic[:]}, + Address: common.HexToAddress("0x1234"), + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello"), + }, + })) + logs, err := o1.selectLogsByBlockRange(10, 10) + require.NoError(t, err) + require.Equal(t, 1, len(logs)) + assert.Equal(t, []byte("hello"), logs[0].Data) + + logs, err = o1.SelectLogsByBlockRangeFilter(10, 10, common.HexToAddress("0x1234"), topic[:]) + require.NoError(t, err) + require.Equal(t, 1, len(logs)) + + // With no blocks, should be an error + _, err = o1.SelectLatestLogEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) + require.Error(t, err) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + // With block 10, only 0 confs should work + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10)) + log, err := o1.SelectLatestLogEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) + require.NoError(t, err) + assert.Equal(t, int64(10), log.BlockNumber) + _, err = o1.SelectLatestLogEventSigWithConfs(topic, common.HexToAddress("0x1234"), 1) + require.Error(t, err) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + // With block 12, anything <=2 should work + require.NoError(t, o1.DeleteRangeBlocks(10, 10)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 12)) + _, err = o1.SelectLatestLogEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) + require.NoError(t, err) + _, err = o1.SelectLatestLogEventSigWithConfs(topic, common.HexToAddress("0x1234"), 1) + require.NoError(t, err) + _, err = o1.SelectLatestLogEventSigWithConfs(topic, common.HexToAddress("0x1234"), 2) + require.NoError(t, err) + _, err = o1.SelectLatestLogEventSigWithConfs(topic, common.HexToAddress("0x1234"), 3) + require.Error(t, err) + assert.True(t, errors.Is(err, sql.ErrNoRows)) +} diff --git a/core/chains/evm/mocks/balance_monitor.go b/core/chains/evm/mocks/balance_monitor.go index afefda289b6..920add9d63e 100644 --- a/core/chains/evm/mocks/balance_monitor.go +++ b/core/chains/evm/mocks/balance_monitor.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/mocks/chain.go b/core/chains/evm/mocks/chain.go index 50909d9dd38..b56f8913a1e 100644 --- a/core/chains/evm/mocks/chain.go +++ b/core/chains/evm/mocks/chain.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks @@ -14,6 +14,8 @@ import ( logger "github.com/smartcontractkit/chainlink/core/logger" + logpoller "github.com/smartcontractkit/chainlink/core/chains/evm/logpoller" + mock "github.com/stretchr/testify/mock" monitor "github.com/smartcontractkit/chainlink/core/chains/evm/monitor" @@ -168,6 +170,22 @@ func (_m *Chain) LogBroadcaster() log.Broadcaster { return r0 } +// LogPoller provides a mock function with given fields: +func (_m *Chain) LogPoller() *logpoller.LogPoller { + ret := _m.Called() + + var r0 *logpoller.LogPoller + if rf, ok := ret.Get(0).(func() *logpoller.LogPoller); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*logpoller.LogPoller) + } + } + + return r0 +} + // Logger provides a mock function with given fields: func (_m *Chain) Logger() logger.Logger { ret := _m.Called() diff --git a/core/chains/evm/mocks/chain_set.go b/core/chains/evm/mocks/chain_set.go index 338d102441a..3ba78dbaf3a 100644 --- a/core/chains/evm/mocks/chain_set.go +++ b/core/chains/evm/mocks/chain_set.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/mocks/client.go b/core/chains/evm/mocks/client.go index 4732e63d26d..2d83d86488f 100644 --- a/core/chains/evm/mocks/client.go +++ b/core/chains/evm/mocks/client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks @@ -78,6 +78,29 @@ func (_m *Client) BatchCallContextAll(ctx context.Context, b []rpc.BatchElem) er return r0 } +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *Client) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + ret := _m.Called(ctx, hash) + + var r0 *types.Block + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Block); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // BlockByNumber provides a mock function with given fields: ctx, number func (_m *Client) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { ret := _m.Called(ctx, number) diff --git a/core/chains/evm/mocks/node.go b/core/chains/evm/mocks/node.go index 5fe845eef54..3cb70c8e469 100644 --- a/core/chains/evm/mocks/node.go +++ b/core/chains/evm/mocks/node.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks @@ -63,6 +63,29 @@ func (_m *Node) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { return r0 } +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *Node) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + ret := _m.Called(ctx, hash) + + var r0 *types.Block + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Block); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // BlockByNumber provides a mock function with given fields: ctx, number func (_m *Node) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { ret := _m.Called(ctx, number) diff --git a/core/chains/evm/mocks/orm.go b/core/chains/evm/mocks/orm.go deleted file mode 100644 index f95ae9c2630..00000000000 --- a/core/chains/evm/mocks/orm.go +++ /dev/null @@ -1,367 +0,0 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. - -package mocks - -import ( - big "math/big" - - pg "github.com/smartcontractkit/chainlink/core/services/pg" - mock "github.com/stretchr/testify/mock" - - types "github.com/smartcontractkit/chainlink/core/chains/evm/types" - - utils "github.com/smartcontractkit/chainlink/core/utils" -) - -// ORM is an autogenerated mock type for the ORM type -type ORM struct { - mock.Mock -} - -// Chain provides a mock function with given fields: id -func (_m *ORM) Chain(id utils.Big) (types.Chain, error) { - ret := _m.Called(id) - - var r0 types.Chain - if rf, ok := ret.Get(0).(func(utils.Big) types.Chain); ok { - r0 = rf(id) - } else { - r0 = ret.Get(0).(types.Chain) - } - - var r1 error - if rf, ok := ret.Get(1).(func(utils.Big) error); ok { - r1 = rf(id) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Chains provides a mock function with given fields: offset, limit -func (_m *ORM) Chains(offset int, limit int) ([]types.Chain, int, error) { - ret := _m.Called(offset, limit) - - var r0 []types.Chain - if rf, ok := ret.Get(0).(func(int, int) []types.Chain); ok { - r0 = rf(offset, limit) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]types.Chain) - } - } - - var r1 int - if rf, ok := ret.Get(1).(func(int, int) int); ok { - r1 = rf(offset, limit) - } else { - r1 = ret.Get(1).(int) - } - - var r2 error - if rf, ok := ret.Get(2).(func(int, int) error); ok { - r2 = rf(offset, limit) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// Clear provides a mock function with given fields: chainID, key -func (_m *ORM) Clear(chainID *big.Int, key string) error { - ret := _m.Called(chainID, key) - - var r0 error - if rf, ok := ret.Get(0).(func(*big.Int, string) error); ok { - r0 = rf(chainID, key) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// CreateChain provides a mock function with given fields: id, config -func (_m *ORM) CreateChain(id utils.Big, config types.ChainCfg) (types.Chain, error) { - ret := _m.Called(id, config) - - var r0 types.Chain - if rf, ok := ret.Get(0).(func(utils.Big, types.ChainCfg) types.Chain); ok { - r0 = rf(id, config) - } else { - r0 = ret.Get(0).(types.Chain) - } - - var r1 error - if rf, ok := ret.Get(1).(func(utils.Big, types.ChainCfg) error); ok { - r1 = rf(id, config) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateNode provides a mock function with given fields: data -func (_m *ORM) CreateNode(data types.NewNode) (types.Node, error) { - ret := _m.Called(data) - - var r0 types.Node - if rf, ok := ret.Get(0).(func(types.NewNode) types.Node); ok { - r0 = rf(data) - } else { - r0 = ret.Get(0).(types.Node) - } - - var r1 error - if rf, ok := ret.Get(1).(func(types.NewNode) error); ok { - r1 = rf(data) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteChain provides a mock function with given fields: id -func (_m *ORM) DeleteChain(id utils.Big) error { - ret := _m.Called(id) - - var r0 error - if rf, ok := ret.Get(0).(func(utils.Big) error); ok { - r0 = rf(id) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteNode provides a mock function with given fields: id -func (_m *ORM) DeleteNode(id int64) error { - ret := _m.Called(id) - - var r0 error - if rf, ok := ret.Get(0).(func(int64) error); ok { - r0 = rf(id) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// EnabledChainsWithNodes provides a mock function with given fields: -func (_m *ORM) EnabledChainsWithNodes() ([]types.Chain, error) { - ret := _m.Called() - - var r0 []types.Chain - if rf, ok := ret.Get(0).(func() []types.Chain); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]types.Chain) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetChainsByIDs provides a mock function with given fields: ids -func (_m *ORM) GetChainsByIDs(ids []utils.Big) ([]types.Chain, error) { - ret := _m.Called(ids) - - var r0 []types.Chain - if rf, ok := ret.Get(0).(func([]utils.Big) []types.Chain); ok { - r0 = rf(ids) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]types.Chain) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func([]utils.Big) error); ok { - r1 = rf(ids) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetNodesByChainIDs provides a mock function with given fields: chainIDs, qopts -func (_m *ORM) GetNodesByChainIDs(chainIDs []utils.Big, qopts ...pg.QOpt) ([]types.Node, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, chainIDs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 []types.Node - if rf, ok := ret.Get(0).(func([]utils.Big, ...pg.QOpt) []types.Node); ok { - r0 = rf(chainIDs, qopts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]types.Node) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func([]utils.Big, ...pg.QOpt) error); ok { - r1 = rf(chainIDs, qopts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Node provides a mock function with given fields: id, qopts -func (_m *ORM) Node(id int32, qopts ...pg.QOpt) (types.Node, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, id) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 types.Node - if rf, ok := ret.Get(0).(func(int32, ...pg.QOpt) types.Node); ok { - r0 = rf(id, qopts...) - } else { - r0 = ret.Get(0).(types.Node) - } - - var r1 error - if rf, ok := ret.Get(1).(func(int32, ...pg.QOpt) error); ok { - r1 = rf(id, qopts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Nodes provides a mock function with given fields: offset, limit, qopts -func (_m *ORM) Nodes(offset int, limit int, qopts ...pg.QOpt) ([]types.Node, int, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, offset, limit) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 []types.Node - if rf, ok := ret.Get(0).(func(int, int, ...pg.QOpt) []types.Node); ok { - r0 = rf(offset, limit, qopts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]types.Node) - } - } - - var r1 int - if rf, ok := ret.Get(1).(func(int, int, ...pg.QOpt) int); ok { - r1 = rf(offset, limit, qopts...) - } else { - r1 = ret.Get(1).(int) - } - - var r2 error - if rf, ok := ret.Get(2).(func(int, int, ...pg.QOpt) error); ok { - r2 = rf(offset, limit, qopts...) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// NodesForChain provides a mock function with given fields: chainID, offset, limit, qopts -func (_m *ORM) NodesForChain(chainID utils.Big, offset int, limit int, qopts ...pg.QOpt) ([]types.Node, int, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, chainID, offset, limit) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 []types.Node - if rf, ok := ret.Get(0).(func(utils.Big, int, int, ...pg.QOpt) []types.Node); ok { - r0 = rf(chainID, offset, limit, qopts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]types.Node) - } - } - - var r1 int - if rf, ok := ret.Get(1).(func(utils.Big, int, int, ...pg.QOpt) int); ok { - r1 = rf(chainID, offset, limit, qopts...) - } else { - r1 = ret.Get(1).(int) - } - - var r2 error - if rf, ok := ret.Get(2).(func(utils.Big, int, int, ...pg.QOpt) error); ok { - r2 = rf(chainID, offset, limit, qopts...) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// StoreString provides a mock function with given fields: chainID, key, val -func (_m *ORM) StoreString(chainID *big.Int, key string, val string) error { - ret := _m.Called(chainID, key, val) - - var r0 error - if rf, ok := ret.Get(0).(func(*big.Int, string, string) error); ok { - r0 = rf(chainID, key, val) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateChain provides a mock function with given fields: id, enabled, config -func (_m *ORM) UpdateChain(id utils.Big, enabled bool, config types.ChainCfg) (types.Chain, error) { - ret := _m.Called(id, enabled, config) - - var r0 types.Chain - if rf, ok := ret.Get(0).(func(utils.Big, bool, types.ChainCfg) types.Chain); ok { - r0 = rf(id, enabled, config) - } else { - r0 = ret.Get(0).(types.Chain) - } - - var r1 error - if rf, ok := ret.Get(1).(func(utils.Big, bool, types.ChainCfg) error); ok { - r1 = rf(id, enabled, config) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/core/chains/evm/mocks/send_only_node.go b/core/chains/evm/mocks/send_only_node.go index 3cc551e6dfb..6e2131bf7ef 100644 --- a/core/chains/evm/mocks/send_only_node.go +++ b/core/chains/evm/mocks/send_only_node.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/mocks/subscription.go b/core/chains/evm/mocks/subscription.go index 161a0112527..e04aa430d4b 100644 --- a/core/chains/evm/mocks/subscription.go +++ b/core/chains/evm/mocks/subscription.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/monitor/balance.go b/core/chains/evm/monitor/balance.go index c504f5a445b..f0ec7e3e888 100644 --- a/core/chains/evm/monitor/balance.go +++ b/core/chains/evm/monitor/balance.go @@ -164,7 +164,7 @@ func (w *worker) Work() { } func (w *worker) WorkCtx(ctx context.Context) { - keys, err := w.bm.ethKeyStore.SendingKeys() + keys, err := w.bm.ethKeyStore.SendingKeys(nil) if err != nil { w.bm.logger.Error("BalanceMonitor: error getting keys", err) } diff --git a/core/chains/evm/orm.go b/core/chains/evm/orm.go index b795e084961..140de60946a 100644 --- a/core/chains/evm/orm.go +++ b/core/chains/evm/orm.go @@ -1,215 +1,17 @@ package evm import ( - "database/sql" - "math/big" - - "github.com/lib/pq" - "github.com/pkg/errors" - "github.com/smartcontractkit/sqlx" + "github.com/smartcontractkit/chainlink/core/chains" "github.com/smartcontractkit/chainlink/core/chains/evm/types" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/pg" "github.com/smartcontractkit/chainlink/core/utils" ) -type orm struct { - db *sqlx.DB - q pg.Q -} - -var _ types.ORM = (*orm)(nil) - // NewORM returns a new EVM ORM func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.LogConfig) types.ORM { - lggr = lggr.Named("EVMORM") - return &orm{db, pg.NewQ(db, lggr, cfg)} -} - -func (o *orm) Chain(id utils.Big) (chain types.Chain, err error) { - sql := `SELECT * FROM evm_chains WHERE id = $1` - err = o.db.Get(&chain, sql, id) - return chain, err -} - -func (o *orm) CreateChain(id utils.Big, config types.ChainCfg) (chain types.Chain, err error) { - sql := `INSERT INTO evm_chains (id, cfg, created_at, updated_at) VALUES ($1, $2, now(), now()) RETURNING *` - err = o.db.Get(&chain, sql, id, config) - return chain, err -} - -func (o *orm) UpdateChain(id utils.Big, enabled bool, config types.ChainCfg) (chain types.Chain, err error) { - sql := `UPDATE evm_chains SET enabled = $1, cfg = $2, updated_at = now() WHERE id = $3 RETURNING *` - err = o.db.Get(&chain, sql, enabled, config, id) - return chain, err -} - -func (o *orm) DeleteChain(id utils.Big) error { - q := `DELETE FROM evm_chains WHERE id = $1` - result, err := o.db.Exec(q, id) - if err != nil { - return err - } - rowsAffected, err := result.RowsAffected() - if err != nil { - return err - } - if rowsAffected == 0 { - return sql.ErrNoRows - } - return nil -} - -func (o *orm) Chains(offset, limit int) (chains []types.Chain, count int, err error) { - if err = o.db.Get(&count, "SELECT COUNT(*) FROM evm_chains"); err != nil { - return - } - - sql := `SELECT * FROM evm_chains ORDER BY created_at, id LIMIT $1 OFFSET $2;` - if err = o.db.Select(&chains, sql, limit, offset); err != nil { - return - } - - return -} - -// GetChainsByIDs fetches allow nodes for the given chain ids. -func (o *orm) GetChainsByIDs(ids []utils.Big) (chains []types.Chain, err error) { - sql := `SELECT * FROM evm_chains WHERE id = ANY($1) ORDER BY created_at, id;` - - chainIDs := pq.Array(ids) - if err = o.db.Select(&chains, sql, chainIDs); err != nil { - return nil, err - } - - return chains, nil -} - -func (o *orm) CreateNode(data types.NewNode) (node types.Node, err error) { - sql := `INSERT INTO evm_nodes (name, evm_chain_id, ws_url, http_url, send_only, created_at, updated_at) - VALUES (:name, :evm_chain_id, :ws_url, :http_url, :send_only, now(), now()) - RETURNING *;` - stmt, err := o.db.PrepareNamed(sql) - if err != nil { - return node, err - } - err = stmt.Get(&node, data) - return node, err -} - -func (o *orm) DeleteNode(id int64) error { - q := `DELETE FROM evm_nodes WHERE id = $1` - result, err := o.db.Exec(q, id) - if err != nil { - return err - } - rowsAffected, err := result.RowsAffected() - if err != nil { - return err - } - if rowsAffected == 0 { - return sql.ErrNoRows - } - return nil -} - -func (o *orm) EnabledChainsWithNodes() (chains []types.Chain, err error) { - var nodes []types.Node - chainsSQL := `SELECT * FROM evm_chains WHERE enabled ORDER BY created_at, id;` - if err = o.db.Select(&chains, chainsSQL); err != nil { - return - } - nodesSQL := `SELECT * FROM evm_nodes ORDER BY created_at, id;` - if err = o.db.Select(&nodes, nodesSQL); err != nil { - return - } - nodemap := make(map[string][]types.Node) - for _, n := range nodes { - nodemap[n.EVMChainID.String()] = append(nodemap[n.EVMChainID.String()], n) - } - for i, c := range chains { - chains[i].Nodes = nodemap[c.ID.String()] - } - return chains, nil -} - -func (o *orm) Nodes(offset, limit int, qopts ...pg.QOpt) (nodes []types.Node, count int, err error) { - err = o.q.WithOpts(qopts...).Transaction(func(q pg.Queryer) error { - if err = o.db.Get(&count, "SELECT COUNT(*) FROM evm_nodes"); err != nil { - return errors.Wrap(err, "Nodes failed to fetch nodes count") - } - - sql := `SELECT * FROM evm_nodes ORDER BY created_at, id LIMIT $1 OFFSET $2;` - err = o.db.Select(&nodes, sql, limit, offset) - return errors.Wrap(err, "Nodes failed to fetch nodes") - }) - - return -} - -// GetNodesByChainIDs fetches allow nodes for the given chain ids. -func (o *orm) GetNodesByChainIDs(chainIDs []utils.Big, qopts ...pg.QOpt) (nodes []types.Node, err error) { - sql := `SELECT * FROM evm_nodes WHERE evm_chain_id = ANY($1) ORDER BY created_at, id;` - - cids := pq.Array(chainIDs) - if err = o.q.WithOpts(qopts...).Select(&nodes, sql, cids); err != nil { - return nil, err - } - - return nodes, nil -} - -func (o *orm) NodesForChain(chainID utils.Big, offset, limit int, qopts ...pg.QOpt) (nodes []types.Node, count int, err error) { - err = o.q.WithOpts(qopts...).Transaction(func(q pg.Queryer) error { - if err = q.Get(&count, "SELECT COUNT(*) FROM evm_nodes WHERE evm_chain_id = $1", chainID); err != nil { - return errors.Wrap(err, "NodesForChain failed to fetch nodes count") - } - - sql := `SELECT * FROM evm_nodes WHERE evm_chain_id = $1 ORDER BY created_at, id LIMIT $2 OFFSET $3;` - err = q.Select(&nodes, sql, chainID, limit, offset) - return errors.Wrap(err, "NodesForChain failed to fetch nodes") - }, pg.OptReadOnlyTx()) - - return -} - -func (o *orm) Node(id int32, qopts ...pg.QOpt) (node types.Node, err error) { - q := o.q.WithOpts(qopts...) - err = q.Get(&node, "SELECT * FROM evm_nodes WHERE id = $1;", id) - - return -} - -// StoreString saves a string value into the config for the given chain and key -func (o *orm) StoreString(chainID *big.Int, name, val string) error { - res, err := o.db.Exec(`UPDATE evm_chains SET cfg = cfg || jsonb_build_object($1::text, $2::text) WHERE id = $3`, name, val, utils.NewBig(chainID)) - if err != nil { - return errors.Wrapf(err, "failed to store chain config for chain ID %s", chainID.String()) - } - rowsAffected, err := res.RowsAffected() - if err != nil { - return err - } - if rowsAffected == 0 { - return errors.Wrapf(sql.ErrNoRows, "no chain found with ID %s", chainID.String()) - } - return nil -} - -// Clear deletes a config value for the given chain and key -func (o *orm) Clear(chainID *big.Int, name string) error { - res, err := o.db.Exec(`UPDATE evm_chains SET cfg = cfg - $1 WHERE id = $2`, name, utils.NewBig(chainID)) - if err != nil { - return errors.Wrapf(err, "failed to clear chain config for chain ID %s", chainID.String()) - } - rowsAffected, err := res.RowsAffected() - if err != nil { - return err - } - if rowsAffected == 0 { - return errors.Wrapf(sql.ErrNoRows, "no chain found with ID %s", chainID.String()) - } - return nil + q := pg.NewQ(db, lggr.Named("EVMORM"), cfg) + return chains.NewORM[utils.Big, types.ChainCfg, types.Node](q, "evm", "ws_url", "http_url", "send_only") } diff --git a/core/chains/evm/orm_test.go b/core/chains/evm/orm_test.go index 992196b6b74..03037b2aad2 100644 --- a/core/chains/evm/orm_test.go +++ b/core/chains/evm/orm_test.go @@ -38,7 +38,7 @@ func mustInsertChain(t *testing.T, orm types.ORM) types.Chain { func mustInsertNode(t *testing.T, orm types.ORM, chainID utils.Big) types.Node { t.Helper() - params := types.NewNode{ + params := types.Node{ Name: "Test node", EVMChainID: chainID, WSURL: null.StringFrom("ws://localhost:8546"), @@ -90,7 +90,7 @@ func Test_EVMORM_CreateNode(t *testing.T) { _, initialCount, err := orm.Nodes(0, 25) require.NoError(t, err) - params := types.NewNode{ + params := types.Node{ Name: "Test node", EVMChainID: chain.ID, WSURL: null.StringFrom("ws://localhost:8546"), diff --git a/core/chains/evm/txmgr/eth_broadcaster.go b/core/chains/evm/txmgr/eth_broadcaster.go index baa034d2124..7e678366d5a 100644 --- a/core/chains/evm/txmgr/eth_broadcaster.go +++ b/core/chains/evm/txmgr/eth_broadcaster.go @@ -17,10 +17,10 @@ import ( evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" "github.com/smartcontractkit/chainlink/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/core/chains/evm/label" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/keystore/keys/ethkey" "github.com/smartcontractkit/chainlink/core/services/pg" - "github.com/smartcontractkit/chainlink/core/static" "github.com/smartcontractkit/chainlink/core/utils" ) @@ -272,7 +272,7 @@ func (eb *EthBroadcaster) processUnstartedEthTxs(ctx context.Context, fromAddres if err != nil { return errors.Wrap(err, "CountUnstartedTransactions failed") } - eb.logger.Warnw(fmt.Sprintf(`Transaction throttling; %d transactions in-flight and %d unstarted transactions pending (maximum number of in-flight transactions is %d per key). %s`, nUnconfirmed, nUnstarted, maxInFlightTransactions, static.EvmMaxInFlightTransactionsWarningLabel), "maxInFlightTransactions", maxInFlightTransactions, "nUnconfirmed", nUnconfirmed, "nUnstarted", nUnstarted) + eb.logger.Warnw(fmt.Sprintf(`Transaction throttling; %d transactions in-flight and %d unstarted transactions pending (maximum number of in-flight transactions is %d per key). %s`, nUnconfirmed, nUnstarted, maxInFlightTransactions, label.MaxInFlightTransactionsWarning), "maxInFlightTransactions", maxInFlightTransactions, "nUnconfirmed", nUnconfirmed, "nUnstarted", nUnstarted) time.Sleep(InFlightTransactionRecheckInterval) continue } @@ -375,47 +375,44 @@ func (eb *EthBroadcaster) handleInProgressEthTx(ctx context.Context, etx EthTx, return errors.Wrap(err, "building transmit checker") } + lgr := etx.GetLogger(eb.logger.With( + "gasPrice", attempt.GasPrice, + "gasTipCap", attempt.GasTipCap, + "gasFeeCap", attempt.GasFeeCap, + )) + // If the transmit check does not complete within the timeout, the transaction will be sent // anyway. checkCtx, cancel := context.WithTimeout(ctx, TransmitCheckTimeout) defer cancel() - err = checker.Check(checkCtx, eb.logger, etx, attempt) + err = checker.Check(checkCtx, lgr, etx, attempt) if errors.Is(err, context.Canceled) { - eb.logger.Errorw("Transmission checker timed out, sending anyway", - "ethTxId", etx.ID, - "meta", etx.Meta, - "checker", etx.TransmitChecker) + lgr.Warn("Transmission checker timed out, sending anyway") } else if err != nil { etx.Error = null.StringFrom(err.Error()) - eb.logger.Infow("Transmission checker failed, fatally erroring transaction.", - "ethTxId", etx.ID, - "meta", etx.Meta, - "checker", etx.TransmitChecker, - "err", err) - return eb.saveFatallyErroredTransaction(&etx) + lgr.Warnw("Transmission checker failed, fatally erroring transaction.", "err", err) + return eb.saveFatallyErroredTransaction(lgr, &etx) } cancel() - sendError := sendTransaction(ctx, eb.ethClient, attempt, etx, eb.logger) - + sendError := sendTransaction(ctx, eb.ethClient, attempt, etx, lgr) if sendError.IsTooExpensive() { - eb.logger.Criticalw("Transaction gas price was rejected by the eth node for being too high. Consider increasing your eth node's RPCTxFeeCap (it is suggested to run geth with no cap i.e. --rpc.gascap=0 --rpc.txfeecap=0)", + lgr.Criticalw(fmt.Sprintf("Sending transaction failed; %s", label.RPCTxFeeCapConfiguredIncorrectlyWarning), "ethTxID", etx.ID, "err", sendError, - "gasPrice", attempt.GasPrice, - "gasLimit", etx.GasLimit, "id", "RPCTxFeeCapExceeded", ) etx.Error = null.StringFrom(sendError.Error()) // Attempt is thrown away in this case; we don't need it since it never got accepted by a node - return eb.saveFatallyErroredTransaction(&etx) + return eb.saveFatallyErroredTransaction(lgr, &etx) } if sendError.Fatal() { - eb.logger.Criticalw("Fatal error sending transaction", "ethTxID", etx.ID, "error", sendError, "gasLimit", etx.GasLimit, "gasPrice", attempt.GasPrice) + lgr.Criticalw("Fatal error sending transaction", + "err", sendError) etx.Error = null.StringFrom(sendError.Error()) // Attempt is thrown away in this case; we don't need it since it never got accepted by a node - return eb.saveFatallyErroredTransaction(&etx) + return eb.saveFatallyErroredTransaction(lgr, &etx) } etx.BroadcastAt = &initialBroadcastAt @@ -463,12 +460,12 @@ func (eb *EthBroadcaster) handleInProgressEthTx(ctx context.Context, etx EthTx, } if sendError.IsTerminallyUnderpriced() { - return eb.tryAgainBumpingGas(ctx, sendError, etx, attempt, initialBroadcastAt) + return eb.tryAgainBumpingGas(ctx, lgr, sendError, etx, attempt, initialBroadcastAt) } // Optimism-specific cases if sendError.IsFeeTooLow() || sendError.IsFeeTooHigh() { - return eb.tryAgainWithNewEstimation(ctx, sendError, etx, attempt, initialBroadcastAt) + return eb.tryAgainWithNewEstimation(ctx, lgr, sendError, etx, attempt, initialBroadcastAt) } if sendError.IsTemporarilyUnderpriced() { @@ -476,17 +473,16 @@ func (eb *EthBroadcaster) handleInProgressEthTx(ctx context.Context, etx EthTx, // success (even though the transaction will never confirm) and hand // off to the ethConfirmer to bump gas periodically until we _can_ get // it in - eb.logger.Infow("Transaction temporarily underpriced", "ethTxID", etx.ID, "err", sendError.Error(), "gasPrice", attempt.GasPrice, "gasTipCap", attempt.GasTipCap, "gasFeeCap", attempt.GasFeeCap) + lgr.Infow("Transaction temporarily underpriced", "err", sendError.Error()) sendError = nil } if sendError.IsInsufficientEth() { - eb.logger.Errorw(fmt.Sprintf("Tx 0x%x with type 0x%d was rejected due to insufficient eth. "+ + lgr.Errorw(fmt.Sprintf("Tx 0x%x with type 0x%d was rejected due to insufficient eth. "+ "The eth node returned %s. "+ "ACTION REQUIRED: Chainlink wallet with address 0x%x is OUT OF FUNDS", attempt.Hash, attempt.TxType, sendError.Error(), etx.FromAddress, - ), "ethTxID", etx.ID, "err", sendError, "gasPrice", attempt.GasPrice, - "gasTipCap", attempt.GasTipCap, "gasFeeCap", attempt.GasFeeCap) + ), "err", sendError) // NOTE: This bails out of the entire cycle and essentially "blocks" on // any transaction that gets insufficient_eth. This is OK if a // transaction with a large VALUE blocks because this always comes last @@ -592,32 +588,30 @@ func saveAttempt(q pg.Q, etx *EthTx, attempt EthTxAttempt, NewAttemptState EthTx }) } -func (eb *EthBroadcaster) tryAgainBumpingGas(ctx context.Context, sendError *evmclient.SendError, etx EthTx, attempt EthTxAttempt, initialBroadcastAt time.Time) error { - eb.logger. - With( - "sendError", sendError, - "attemptGasFeeCap", attempt.GasFeeCap, - "attemptGasPrice", attempt.GasPrice, - "attemptGasTipCap", attempt.GasTipCap, - "maxGasPriceConfig", eb.config.EvmMaxGasPriceWei(), - ). - Errorf("attempt gas price %v wei was rejected by the eth node for being too low. "+ - "Eth node returned: '%s'. "+ - "Will bump and retry. ACTION REQUIRED: This is a configuration error. "+ - "Consider increasing ETH_GAS_PRICE_DEFAULT (current value: %s)", - attempt.GasPrice, sendError.Error(), eb.config.EvmGasPriceDefault().String()) +func (eb *EthBroadcaster) tryAgainBumpingGas(ctx context.Context, lgr logger.Logger, sendError *evmclient.SendError, etx EthTx, attempt EthTxAttempt, initialBroadcastAt time.Time) error { + lgr.With( + "sendError", sendError, + "attemptGasFeeCap", attempt.GasFeeCap, + "attemptGasPrice", attempt.GasPrice, + "attemptGasTipCap", attempt.GasTipCap, + "maxGasPriceConfig", eb.config.EvmMaxGasPriceWei(), + ).Errorf("attempt gas price %v wei was rejected by the eth node for being too low. "+ + "Eth node returned: '%s'. "+ + "Will bump and retry. ACTION REQUIRED: This is a configuration error. "+ + "Consider increasing ETH_GAS_PRICE_DEFAULT (current value: %s)", + attempt.GasPrice, sendError.Error(), eb.config.EvmGasPriceDefault().String()) switch attempt.TxType { case 0x0: - return eb.tryAgainBumpingLegacyGas(ctx, sendError, etx, attempt, initialBroadcastAt) + return eb.tryAgainBumpingLegacyGas(ctx, lgr, etx, attempt, initialBroadcastAt) case 0x2: - return eb.tryAgainBumpingDynamicFeeGas(ctx, sendError, etx, attempt, initialBroadcastAt) + return eb.tryAgainBumpingDynamicFeeGas(ctx, lgr, etx, attempt, initialBroadcastAt) default: return errors.Errorf("invariant violation: Attempt %v had unrecognised transaction type %v"+ "This is a bug! Please report to https://github.com/smartcontractkit/chainlink/issues", attempt.ID, attempt.TxType) } } -func (eb *EthBroadcaster) tryAgainBumpingLegacyGas(ctx context.Context, sendError *evmclient.SendError, etx EthTx, attempt EthTxAttempt, initialBroadcastAt time.Time) error { +func (eb *EthBroadcaster) tryAgainBumpingLegacyGas(ctx context.Context, lgr logger.Logger, etx EthTx, attempt EthTxAttempt, initialBroadcastAt time.Time) error { bumpedGasPrice, bumpedGasLimit, err := eb.estimator.BumpLegacyGas(attempt.GasPrice.ToInt(), etx.GasLimit) if err != nil { return errors.Wrap(err, "tryAgainBumpingLegacyGas failed") @@ -625,10 +619,10 @@ func (eb *EthBroadcaster) tryAgainBumpingLegacyGas(ctx context.Context, sendErro if bumpedGasPrice.Cmp(attempt.GasPrice.ToInt()) == 0 || bumpedGasPrice.Cmp(eb.config.EvmMaxGasPriceWei()) >= 0 { return errors.Errorf("Hit gas price bump ceiling, will not bump further. This is a terminal error") } - return eb.tryAgainWithNewLegacyGas(ctx, etx, attempt, initialBroadcastAt, bumpedGasPrice, bumpedGasLimit) + return eb.tryAgainWithNewLegacyGas(ctx, lgr, etx, attempt, initialBroadcastAt, bumpedGasPrice, bumpedGasLimit) } -func (eb *EthBroadcaster) tryAgainBumpingDynamicFeeGas(ctx context.Context, sendError *evmclient.SendError, etx EthTx, attempt EthTxAttempt, initialBroadcastAt time.Time) error { +func (eb *EthBroadcaster) tryAgainBumpingDynamicFeeGas(ctx context.Context, lgr logger.Logger, etx EthTx, attempt EthTxAttempt, initialBroadcastAt time.Time) error { bumpedFee, bumpedGasLimit, err := eb.estimator.BumpDynamicFee(attempt.DynamicFee(), etx.GasLimit) if err != nil { return errors.Wrap(err, "tryAgainBumpingDynamicFeeGas failed") @@ -636,10 +630,10 @@ func (eb *EthBroadcaster) tryAgainBumpingDynamicFeeGas(ctx context.Context, send if bumpedFee.TipCap.Cmp(attempt.GasTipCap.ToInt()) == 0 || bumpedFee.FeeCap.Cmp(attempt.GasFeeCap.ToInt()) == 0 || bumpedFee.TipCap.Cmp(eb.config.EvmMaxGasPriceWei()) >= 0 || bumpedFee.TipCap.Cmp(eb.config.EvmMaxGasPriceWei()) >= 0 { return errors.Errorf("Hit gas price bump ceiling, will not bump further. This is a terminal error") } - return eb.tryAgainWithNewDynamicFeeGas(ctx, etx, attempt, initialBroadcastAt, bumpedFee, bumpedGasLimit) + return eb.tryAgainWithNewDynamicFeeGas(ctx, lgr, etx, attempt, initialBroadcastAt, bumpedFee, bumpedGasLimit) } -func (eb *EthBroadcaster) tryAgainWithNewEstimation(ctx context.Context, sendError *evmclient.SendError, etx EthTx, attempt EthTxAttempt, initialBroadcastAt time.Time) error { +func (eb *EthBroadcaster) tryAgainWithNewEstimation(ctx context.Context, lgr logger.Logger, sendError *evmclient.SendError, etx EthTx, attempt EthTxAttempt, initialBroadcastAt time.Time) error { if attempt.TxType == 0x2 { return errors.Errorf("AssumptionViolation: re-estimation is not supported for EIP-1559 transactions. Eth node returned error: %v. This is a bug.", sendError.Error()) } @@ -647,12 +641,12 @@ func (eb *EthBroadcaster) tryAgainWithNewEstimation(ctx context.Context, sendErr if err != nil { return errors.Wrap(err, "tryAgainWithNewEstimation failed to estimate gas") } - eb.logger.Debugw("Optimism rejected transaction due to incorrect fee, re-estimated and will try again", + lgr.Warnw("Optimism rejected transaction due to incorrect fee, re-estimated and will try again", "etxID", etx.ID, "err", err, "newGasPrice", gasPrice, "newGasLimit", gasLimit) - return eb.tryAgainWithNewLegacyGas(ctx, etx, attempt, initialBroadcastAt, gasPrice, gasLimit) + return eb.tryAgainWithNewLegacyGas(ctx, lgr, etx, attempt, initialBroadcastAt, gasPrice, gasLimit) } -func (eb *EthBroadcaster) tryAgainWithNewLegacyGas(ctx context.Context, etx EthTx, attempt EthTxAttempt, initialBroadcastAt time.Time, newGasPrice *big.Int, newGasLimit uint64) error { +func (eb *EthBroadcaster) tryAgainWithNewLegacyGas(ctx context.Context, lgr logger.Logger, etx EthTx, attempt EthTxAttempt, initialBroadcastAt time.Time, newGasPrice *big.Int, newGasLimit uint64) error { replacementAttempt, err := eb.NewLegacyAttempt(etx, newGasPrice, newGasLimit) if err != nil { return errors.Wrap(err, "tryAgainWithNewLegacyGas failed") @@ -661,11 +655,11 @@ func (eb *EthBroadcaster) tryAgainWithNewLegacyGas(ctx context.Context, etx EthT if err = saveReplacementInProgressAttempt(eb.q, attempt, &replacementAttempt); err != nil { return errors.Wrap(err, "tryAgainWithNewLegacyGas failed") } - eb.logger.Debugw("Bumped legacy gas on initial send", "oldGasPrice", attempt.GasPrice, "newGasPrice", newGasPrice) + lgr.Debugw("Bumped legacy gas on initial send", "oldGasPrice", attempt.GasPrice, "newGasPrice", newGasPrice) return eb.handleInProgressEthTx(ctx, etx, replacementAttempt, initialBroadcastAt) } -func (eb *EthBroadcaster) tryAgainWithNewDynamicFeeGas(ctx context.Context, etx EthTx, attempt EthTxAttempt, initialBroadcastAt time.Time, newDynamicFee gas.DynamicFee, newGasLimit uint64) error { +func (eb *EthBroadcaster) tryAgainWithNewDynamicFeeGas(ctx context.Context, lgr logger.Logger, etx EthTx, attempt EthTxAttempt, initialBroadcastAt time.Time, newDynamicFee gas.DynamicFee, newGasLimit uint64) error { replacementAttempt, err := eb.NewDynamicFeeAttempt(etx, newDynamicFee, newGasLimit) if err != nil { return errors.Wrap(err, "tryAgainWithNewDynamicFeeGas failed") @@ -674,11 +668,11 @@ func (eb *EthBroadcaster) tryAgainWithNewDynamicFeeGas(ctx context.Context, etx if err = saveReplacementInProgressAttempt(eb.q, attempt, &replacementAttempt); err != nil { return errors.Wrap(err, "tryAgainWithNewDynamicFeeGas failed") } - eb.logger.Debugw("Bumped dynamic fee gas on initial send", "oldFee", attempt.DynamicFee(), "newFee", newDynamicFee) + lgr.Debugw("Bumped dynamic fee gas on initial send", "oldFee", attempt.DynamicFee(), "newFee", newDynamicFee) return eb.handleInProgressEthTx(ctx, etx, replacementAttempt, initialBroadcastAt) } -func (eb *EthBroadcaster) saveFatallyErroredTransaction(etx *EthTx) error { +func (eb *EthBroadcaster) saveFatallyErroredTransaction(lgr logger.Logger, etx *EthTx) error { if etx.State != EthTxInProgress { return errors.Errorf("can only transition to fatal_error from in_progress, transaction is currently %s", etx.State) } @@ -700,7 +694,7 @@ func (eb *EthBroadcaster) saveFatallyErroredTransaction(etx *EthTx) error { if etx.PipelineTaskRunID.Valid && eb.resumeCallback != nil { err := eb.resumeCallback(etx.PipelineTaskRunID.UUID, nil, errors.Errorf("fatal error while sending transaction: %s", etx.Error.String)) if errors.Is(err, sql.ErrNoRows) { - eb.logger.Debugw("callback missing or already resumed", "etxID", etx.ID) + lgr.Debugw("callback missing or already resumed", "etxID", etx.ID) } else if err != nil { return errors.Wrap(err, "failed to resume pipeline") } diff --git a/core/chains/evm/txmgr/eth_broadcaster_test.go b/core/chains/evm/txmgr/eth_broadcaster_test.go index 717e0c0ae0e..aca790e4839 100644 --- a/core/chains/evm/txmgr/eth_broadcaster_test.go +++ b/core/chains/evm/txmgr/eth_broadcaster_test.go @@ -530,7 +530,7 @@ func TestEthBroadcaster_TransmitChecking(t *testing.T) { func TestEthBroadcaster_ProcessUnstartedEthTxs_OptimisticLockingOnEthTx(t *testing.T) { // non-transactional DB needed because we deliberately test for FK violation - cfg, db := heavyweight.FullTestDB(t, "eth_broadcaster_optimistic_locking", true, true) + cfg, db := heavyweight.FullTestDB(t, "eth_broadcaster_optimistic_locking") borm := cltest.NewTxmORM(t, db, cfg) evmcfg := evmtest.NewChainScopedConfig(t, cfg) ethClient := cltest.NewEthClientMockWithDefaultChain(t) @@ -1651,7 +1651,7 @@ func TestEthBroadcaster_Trigger(t *testing.T) { func TestEthBroadcaster_EthTxInsertEventCausesTriggerToFire(t *testing.T) { // NOTE: Testing triggers requires committing transactions and does not work with transactional tests - cfg, db := heavyweight.FullTestDB(t, "eth_tx_triggers", true, true) + cfg, db := heavyweight.FullTestDB(t, "eth_tx_triggers") borm := cltest.NewTxmORM(t, db, cfg) evmcfg := evmtest.NewChainScopedConfig(t, cfg) diff --git a/core/chains/evm/txmgr/eth_confirmer.go b/core/chains/evm/txmgr/eth_confirmer.go index 6b4e06dab56..2f55e40e283 100644 --- a/core/chains/evm/txmgr/eth_confirmer.go +++ b/core/chains/evm/txmgr/eth_confirmer.go @@ -25,12 +25,12 @@ import ( evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" "github.com/smartcontractkit/chainlink/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/core/chains/evm/label" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/null" "github.com/smartcontractkit/chainlink/core/services/keystore/keys/ethkey" "github.com/smartcontractkit/chainlink/core/services/pg" - "github.com/smartcontractkit/chainlink/core/static" "github.com/smartcontractkit/chainlink/core/utils" ) @@ -98,7 +98,7 @@ type EthConfirmer struct { keyStates []ethkey.State - mb *utils.Mailbox + mb *utils.Mailbox[*evmtypes.Head] ctx context.Context ctxCancel context.CancelFunc wg sync.WaitGroup @@ -128,7 +128,7 @@ func NewEthConfirmer(db *sqlx.DB, ethClient evmclient.Client, config Config, key estimator, resumeCallback, keyStates, - utils.NewMailbox(1), + utils.NewMailbox[*evmtypes.Head](1), context, cancel, sync.WaitGroup{}, @@ -175,8 +175,7 @@ func (ec *EthConfirmer) runLoop() { if !exists { break } - h := evmtypes.AsHead(head) - if err := ec.ProcessHead(ec.ctx, h); err != nil { + if err := ec.ProcessHead(ec.ctx, head); err != nil { ec.lggr.Errorw("Error processing head", "err", err) continue } @@ -483,10 +482,10 @@ func (ec *EthConfirmer) batchFetchReceipts(ctx context.Context, attempts []EthTx return nil, errors.Errorf("expected result to be a %T, got %T", (*evmtypes.Receipt)(nil), result) } - l := lggr.With( + l := logger.Sugared(lggr.With( "txHash", attempt.Hash.Hex(), "ethTxAttemptID", attempt.ID, "ethTxID", attempt.EthTxID, "err", err, "nonce", attempt.EthTx.Nonce, - ) + )) if err != nil { l.Error("FetchReceipt failed") @@ -496,7 +495,7 @@ func (ec *EthConfirmer) batchFetchReceipts(ctx context.Context, attempts []EthTx if receipt == nil { // NOTE: This should never happen, but it seems safer to check // regardless to avoid a potential panic - l.Error("AssumptionViolation: got nil receipt") + l.AssumptionViolation("got nil receipt") continue } @@ -505,7 +504,7 @@ func (ec *EthConfirmer) batchFetchReceipts(ctx context.Context, attempts []EthTx continue } - l = l.With("blockHash", receipt.BlockHash.Hex(), "status", receipt.Status, "transactionIndex", receipt.TransactionIndex) + l = logger.Sugared(l.With("blockHash", receipt.BlockHash.Hex(), "status", receipt.Status, "transactionIndex", receipt.TransactionIndex)) if receipt.IsUnmined() { l.Debug("Got receipt for transaction but it's still in the mempool and not included in a block yet") @@ -880,7 +879,7 @@ func FindEthTxsRequiringRebroadcast(ctx context.Context, q pg.Q, lggr logger.Log } else { lggr.Warnw("Expected eth_tx for gas bump to have at least one attempt", "etxID", etx.ID, "blockNum", blockNum, "address", address) } - lggr.Infow(fmt.Sprintf("Found %d transactions to re-sent that have still not been confirmed after at least %d blocks. The oldest of these has not still not been confirmed after %d blocks. These transactions will have their gas price bumped. %s", len(etxBumps), gasBumpThreshold, oldestBlocksBehind, static.EthNodeConnectivityProblemLabel), "blockNum", blockNum, "address", address, "gasBumpThreshold", gasBumpThreshold) + lggr.Infow(fmt.Sprintf("Found %d transactions to re-sent that have still not been confirmed after at least %d blocks. The oldest of these has not still not been confirmed after %d blocks. These transactions will have their gas price bumped. %s", len(etxBumps), gasBumpThreshold, oldestBlocksBehind, label.NodeConnectivityProblemWarning), "blockNum", blockNum, "address", address, "gasBumpThreshold", gasBumpThreshold) } seen := make(map[int64]struct{}) @@ -900,7 +899,7 @@ func FindEthTxsRequiringRebroadcast(ctx context.Context, q pg.Q, lggr logger.Log }) if maxInFlightTransactions > 0 && len(etxs) > int(maxInFlightTransactions) { - lggr.Warnf("%d transactions to rebroadcast which exceeds limit of %d. %s", len(etxs), maxInFlightTransactions, static.EvmMaxInFlightTransactionsWarningLabel) + lggr.Warnf("%d transactions to rebroadcast which exceeds limit of %d. %s", len(etxs), maxInFlightTransactions, label.MaxInFlightTransactionsWarning) etxs = etxs[:maxInFlightTransactions] } @@ -1124,7 +1123,7 @@ func (ec *EthConfirmer) handleInProgressAttempt(ctx context.Context, etx EthTx, // // Best thing we can do is to re-send the previous attempt at the old // price and discard this bumped version. - ec.lggr.Errorw("Bumped transaction gas price was rejected by the eth node for being too high. Consider increasing your eth node's RPCTxFeeCap (it is suggested to run geth with no cap i.e. --rpc.gascap=0 --rpc.txfeecap=0)", + ec.lggr.Errorw(fmt.Sprintf("Transaction gas bump failed; %s", label.RPCTxFeeCapConfiguredIncorrectlyWarning), "ethTxID", etx.ID, "err", sendError, "gasPrice", attempt.GasPrice, diff --git a/core/chains/evm/txmgr/eth_confirmer_test.go b/core/chains/evm/txmgr/eth_confirmer_test.go index 706511d8898..f5d30ccfec6 100644 --- a/core/chains/evm/txmgr/eth_confirmer_test.go +++ b/core/chains/evm/txmgr/eth_confirmer_test.go @@ -2236,7 +2236,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary_WhenOutOfEth(t *testing.T) { _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore, 0) - keys, err := ethKeyStore.SendingKeys() + keys, err := ethKeyStore.SendingKeys(nil) require.NoError(t, err) keyStates, err := ethKeyStore.GetStatesForKeys(keys) require.NoError(t, err) diff --git a/core/chains/evm/txmgr/eth_resender.go b/core/chains/evm/txmgr/eth_resender.go index 0c13eec2a23..04ccf224e4b 100644 --- a/core/chains/evm/txmgr/eth_resender.go +++ b/core/chains/evm/txmgr/eth_resender.go @@ -11,9 +11,9 @@ import ( "github.com/smartcontractkit/sqlx" evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/core/chains/evm/label" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/null" - "github.com/smartcontractkit/chainlink/core/static" "github.com/smartcontractkit/chainlink/core/utils" ) @@ -111,7 +111,7 @@ func (er *EthResender) resendUnconfirmed() error { return nil } - er.logger.Infow(fmt.Sprintf("Re-sending %d unconfirmed transactions that were last sent over %s ago. These transactions are taking longer than usual to be mined. %s", len(attempts), ageThreshold, static.EthNodeConnectivityProblemLabel), "n", len(attempts)) + er.logger.Infow(fmt.Sprintf("Re-sending %d unconfirmed transactions that were last sent over %s ago. These transactions are taking longer than usual to be mined. %s", len(attempts), ageThreshold, label.NodeConnectivityProblemWarning), "n", len(attempts)) batchSize := int(er.config.EvmRPCDefaultBatchSize()) reqs, err := batchSendTransactions(er.ctx, er.db, attempts, batchSize, er.logger, er.ethClient) diff --git a/core/chains/evm/txmgr/mocks/config.go b/core/chains/evm/txmgr/mocks/config.go index efc0b3674c3..a518f7d3c0e 100644 --- a/core/chains/evm/txmgr/mocks/config.go +++ b/core/chains/evm/txmgr/mocks/config.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks @@ -6,7 +6,7 @@ import ( big "math/big" common "github.com/ethereum/go-ethereum/common" - chains "github.com/smartcontractkit/chainlink/core/chains" + config "github.com/smartcontractkit/chainlink/core/config" mock "github.com/stretchr/testify/mock" @@ -89,14 +89,14 @@ func (_m *Config) BlockHistoryEstimatorTransactionPercentile() uint16 { } // ChainType provides a mock function with given fields: -func (_m *Config) ChainType() chains.ChainType { +func (_m *Config) ChainType() config.ChainType { ret := _m.Called() - var r0 chains.ChainType - if rf, ok := ret.Get(0).(func() chains.ChainType); ok { + var r0 config.ChainType + if rf, ok := ret.Get(0).(func() config.ChainType); ok { r0 = rf() } else { - r0 = ret.Get(0).(chains.ChainType) + r0 = ret.Get(0).(config.ChainType) } return r0 diff --git a/core/chains/evm/txmgr/mocks/orm.go b/core/chains/evm/txmgr/mocks/orm.go index 4fd09b33c76..5fdf4996bc9 100644 --- a/core/chains/evm/txmgr/mocks/orm.go +++ b/core/chains/evm/txmgr/mocks/orm.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/txmgr/mocks/reaper_config.go b/core/chains/evm/txmgr/mocks/reaper_config.go index ee0411270c3..b50b36652a7 100644 --- a/core/chains/evm/txmgr/mocks/reaper_config.go +++ b/core/chains/evm/txmgr/mocks/reaper_config.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/txmgr/mocks/tx_manager.go b/core/chains/evm/txmgr/mocks/tx_manager.go index f6a3a6c403a..b6869eed4fb 100644 --- a/core/chains/evm/txmgr/mocks/tx_manager.go +++ b/core/chains/evm/txmgr/mocks/tx_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/txmgr/mocks/tx_strategy.go b/core/chains/evm/txmgr/mocks/tx_strategy.go index 1fb518c447e..667ab036577 100644 --- a/core/chains/evm/txmgr/mocks/tx_strategy.go +++ b/core/chains/evm/txmgr/mocks/tx_strategy.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/evm/txmgr/models.go b/core/chains/evm/txmgr/models.go index c3fd166688f..010b1aa0cf1 100644 --- a/core/chains/evm/txmgr/models.go +++ b/core/chains/evm/txmgr/models.go @@ -5,6 +5,7 @@ import ( "database/sql/driver" "encoding/json" "fmt" + "math/big" "time" "github.com/ethereum/go-ethereum/common" @@ -16,21 +17,31 @@ import ( "github.com/smartcontractkit/chainlink/core/assets" "github.com/smartcontractkit/chainlink/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/core/logger" cnull "github.com/smartcontractkit/chainlink/core/null" "github.com/smartcontractkit/chainlink/core/services/pg/datatypes" "github.com/smartcontractkit/chainlink/core/utils" ) +// EthTxMeta contains fields of the transaction metadata type EthTxMeta struct { - JobID int32 - RequestID common.Hash - RequestTxHash common.Hash + JobID int32 `json:"JobID"` + + // VRF-only fields + RequestID common.Hash `json:"RequestID"` + RequestTxHash common.Hash `json:"RequestTxHash"` + // Batch variants of the above + RequestIDs []common.Hash `json:"RequestIDs"` + RequestTxHashes []common.Hash `json:"RequestTxHashes"` // Used for the VRFv2 - max link this tx will bill // should it get bumped - MaxLink string + MaxLink *string `json:"MaxLink,omitempty"` // Used for the VRFv2 - the subscription ID of the // requester of the VRF. - SubID uint64 `json:"SubId"` + SubID *uint64 `json:"SubId,omitempty"` + + // Used for keepers + UpkeepID *int64 `json:"UpkeepID,omitempty"` } // TransmitCheckerSpec defines the check that should be performed before a transaction is submitted @@ -42,6 +53,10 @@ type TransmitCheckerSpec struct { // VRFCoordinatorAddress is the address of the VRF coordinator that should be used to perform // VRF transmit checks. This should be set iff CheckerType is TransmitCheckerTypeVRFV2. VRFCoordinatorAddress common.Address + + // VRFRequestBlockNumber is the block number in which the provided VRF request has been made. + // This should be set iff CheckerType is TransmitCheckerTypeVRFV2. + VRFRequestBlockNumber *big.Int } type EthTxState string @@ -188,6 +203,43 @@ func (e EthTx) GetMeta() (*EthTxMeta, error) { return &m, errors.Wrap(json.Unmarshal(*e.Meta, &m), "unmarshalling meta") } +// GetLogger returns a new logger with metadata fields. +func (e EthTx) GetLogger(lgr logger.Logger) logger.Logger { + lgr = lgr.With( + "ethTxID", e.ID, + "checker", e.TransmitChecker, + "gasLimit", e.GasLimit, + ) + + meta, err := e.GetMeta() + if err != nil { + lgr.Errorw("failed to get meta of the transaction", "err", err) + return lgr + } + + if meta != nil { + lgr = lgr.With( + "jobID", meta.JobID, + "requestID", meta.RequestID, + "requestTxHash", meta.RequestTxHash, + ) + + if meta.UpkeepID != nil { + lgr = lgr.With("upkeepID", *meta.UpkeepID) + } + + if meta.SubID != nil { + lgr = lgr.With("subID", *meta.SubID) + } + + if meta.MaxLink != nil { + lgr = lgr.With("maxLink", *meta.MaxLink) + } + } + + return lgr +} + // GetChecker returns an EthTx's transmit checker spec in struct form, unmarshalling it from JSON // first. func (e EthTx) GetChecker() (TransmitCheckerSpec, error) { diff --git a/core/chains/evm/txmgr/transmitchecker.go b/core/chains/evm/txmgr/transmitchecker.go index f4c64a34cdc..6cdf7fd317d 100644 --- a/core/chains/evm/txmgr/transmitchecker.go +++ b/core/chains/evm/txmgr/transmitchecker.go @@ -6,6 +6,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common/hexutil" + gethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/pkg/errors" evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" @@ -13,6 +14,7 @@ import ( v2 "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/vrf_coordinator_v2" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/utils" + bigmath "github.com/smartcontractkit/chainlink/core/utils/big_math" ) var ( @@ -48,7 +50,14 @@ func (c *CheckerFactory) BuildChecker(spec TransmitCheckerSpec) (TransmitChecker return nil, errors.Wrapf(err, "failed to create VRF V2 coordinator at address %v", spec.VRFCoordinatorAddress) } - return &VRFV2Checker{coord.GetCommitment}, nil + if spec.VRFRequestBlockNumber == nil { + return nil, errors.New("VRFRequestBlockNumber parameter must be non-nil") + } + return &VRFV2Checker{ + GetCommitment: coord.GetCommitment, + HeaderByNumber: c.Client.HeaderByNumber, + RequestBlockNumber: spec.VRFRequestBlockNumber, + }, nil case "": return NoChecker, nil default: @@ -132,7 +141,7 @@ func (v *VRFV1Checker) Check( if err != nil { l.Errorw("Failed to parse transaction meta. Attempting to transmit anyway.", "err", err, - "ethTxId", tx.ID, + "ethTxID", tx.ID, "meta", tx.Meta) return nil } @@ -140,7 +149,7 @@ func (v *VRFV1Checker) Check( if meta == nil { l.Errorw("Expected a non-nil meta for a VRF transaction. Attempting to transmit anyway.", "err", err, - "ethTxId", tx.ID, + "ethTxID", tx.ID, "meta", tx.Meta) return nil } @@ -148,7 +157,7 @@ func (v *VRFV1Checker) Check( if len(meta.RequestID.Bytes()) != 32 { l.Errorw("Unexpected request ID. Attempting to transmit anyway.", "err", err, - "ethTxId", tx.ID, + "ethTxID", tx.ID, "meta", tx.Meta) return nil } @@ -159,7 +168,7 @@ func (v *VRFV1Checker) Check( if err != nil { l.Errorw("Unable to check if already fulfilled. Attempting to transmit anyway.", "err", err, - "ethTxId", tx.ID, + "ethTxID", tx.ID, "meta", tx.Meta, "reqID", reqID) return nil @@ -167,7 +176,7 @@ func (v *VRFV1Checker) Check( // Request already fulfilled l.Infow("Request already fulfilled", "err", err, - "ethTxId", tx.ID, + "ethTxID", tx.ID, "meta", tx.Meta, "reqID", reqID) return errors.New("request already fulfilled") @@ -184,6 +193,13 @@ type VRFV2Checker struct { // GetCommitment checks whether a VRF V2 request has been fulfilled on the VRFCoordinatorV2 // Solidity contract. GetCommitment func(opts *bind.CallOpts, requestID *big.Int) ([32]byte, error) + + // HeaderByNumber fetches the header given the number. If nil is provided, + // the latest header is fetched. + HeaderByNumber func(ctx context.Context, n *big.Int) (*gethtypes.Header, error) + + // RequestBlockNumber is the block number of the VRFV2 request. + RequestBlockNumber *big.Int } // Check satisfies the TransmitChecker interface. @@ -197,7 +213,7 @@ func (v *VRFV2Checker) Check( if err != nil { l.Errorw("Failed to parse transaction meta. Attempting to transmit anyway.", "err", err, - "ethTxId", tx.ID, + "ethTxID", tx.ID, "meta", tx.Meta) return nil } @@ -205,30 +221,60 @@ func (v *VRFV2Checker) Check( if meta == nil { l.Errorw("Expected a non-nil meta for a VRF transaction. Attempting to transmit anyway.", "err", err, - "ethTxId", tx.ID, + "ethTxID", tx.ID, "meta", tx.Meta) return nil } + h, err := v.HeaderByNumber(ctx, nil) + if err != nil { + l.Errorw("Failed to fetch latest header. Attempting to transmit anyway.", + "err", err, + "ethTxID", tx.ID, + "meta", tx.Meta, + ) + return nil + } + + // If the request block number is not provided, transmit anyway just to be safe. + // Worst we can do is revert due to the request already being fulfilled. + if v.RequestBlockNumber == nil { + l.Errorw("Was provided with a nil request block number. Attempting to transmit anyway.", + "ethTxID", tx.ID, + "meta", tx.Meta, + ) + return nil + } + vrfRequestID := meta.RequestID.Big() - callback, err := v.GetCommitment(&bind.CallOpts{Context: ctx}, vrfRequestID) + + // Subtract 5 since the newest block likely isn't indexed yet and will cause "header not found" + // errors. + latest := new(big.Int).Sub(h.Number, big.NewInt(5)) + blockNumber := bigmath.Max(latest, v.RequestBlockNumber) + callback, err := v.GetCommitment(&bind.CallOpts{ + Context: ctx, + BlockNumber: blockNumber, + }, vrfRequestID) if err != nil { l.Errorw("Failed to check request fulfillment status, error calling GetCommitment. Attempting to transmit anyway.", "err", err, - "ethTxId", tx.ID, + "ethTxID", tx.ID, "meta", tx.Meta, - "vrfRequestId", vrfRequestID) + "vrfRequestId", vrfRequestID, + "blockNumber", h.Number, + ) return nil } else if utils.IsEmpty(callback[:]) { // If seedAndBlockNumber is zero then the response has been fulfilled and we should skip it. l.Infow("Request already fulfilled.", - "ethTxId", tx.ID, + "ethTxID", tx.ID, "meta", tx.Meta, "vrfRequestId", vrfRequestID) return errors.New("request already fulfilled") } else { l.Debugw("Request not yet fulfilled", - "ethTxId", tx.ID, + "ethTxID", tx.ID, "meta", tx.Meta, "vrfRequestId", vrfRequestID) return nil diff --git a/core/chains/evm/txmgr/transmitchecker_test.go b/core/chains/evm/txmgr/transmitchecker_test.go index cfc0397e337..f02599c6d8b 100644 --- a/core/chains/evm/txmgr/transmitchecker_test.go +++ b/core/chains/evm/txmgr/transmitchecker_test.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/pkg/errors" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -25,7 +26,7 @@ import ( ) func TestFactory(t *testing.T) { - client, _ := cltest.NewEthMocksWithDefaultChain(t) + client := cltest.NewEthMocksWithDefaultChain(t) factory := &txmgr.CheckerFactory{Client: client} t.Run("no checker", func(t *testing.T) { @@ -47,9 +48,18 @@ func TestFactory(t *testing.T) { c, err := factory.BuildChecker(txmgr.TransmitCheckerSpec{ CheckerType: txmgr.TransmitCheckerTypeVRFV2, VRFCoordinatorAddress: testutils.NewAddress(), + VRFRequestBlockNumber: big.NewInt(1), }) require.NoError(t, err) require.IsType(t, &txmgr.VRFV2Checker{}, c) + + // request block number not provided should error out. + c, err = factory.BuildChecker(txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeVRFV2, + VRFCoordinatorAddress: testutils.NewAddress(), + }) + require.Error(t, err) + require.Nil(t, c) }) t.Run("simulate checker", func(t *testing.T) { @@ -141,12 +151,14 @@ func TestTransmitCheckers(t *testing.T) { }) t.Run("VRF V1", func(t *testing.T) { + testDefaultSubID := uint64(2) + testDefaultMaxLink := "1000000000000000000" newTx := func(t *testing.T, vrfReqID [32]byte) (txmgr.EthTx, txmgr.EthTxAttempt) { meta := txmgr.EthTxMeta{ RequestID: common.BytesToHash(vrfReqID[:]), - MaxLink: "1000000000000000000", // 1 LINK - SubID: 2, + MaxLink: &testDefaultMaxLink, // 1 LINK + SubID: &testDefaultSubID, } b, err := json.Marshal(meta) @@ -209,12 +221,14 @@ func TestTransmitCheckers(t *testing.T) { }) t.Run("VRF V2", func(t *testing.T) { + testDefaultSubID := uint64(2) + testDefaultMaxLink := "1000000000000000000" newTx := func(t *testing.T, vrfReqID *big.Int) (txmgr.EthTx, txmgr.EthTxAttempt) { meta := txmgr.EthTxMeta{ RequestID: common.BytesToHash(vrfReqID.Bytes()), - MaxLink: "1000000000000000000", // 1 LINK - SubID: 2, + MaxLink: &testDefaultMaxLink, // 1 LINK + SubID: &testDefaultSubID, } b, err := json.Marshal(meta) @@ -239,18 +253,26 @@ func TestTransmitCheckers(t *testing.T) { } } - checker := txmgr.VRFV2Checker{GetCommitment: func(_ *bind.CallOpts, requestID *big.Int) ([32]byte, error) { - if requestID.String() == "1" { - // Request 1 is already fulfilled - return [32]byte{}, nil - } else if requestID.String() == "2" { - // Request 2 errors - return [32]byte{}, errors.New("error getting commitment") - } else { - // All other requests are unfulfilled - return [32]byte{1}, nil - } - }} + checker := txmgr.VRFV2Checker{ + GetCommitment: func(_ *bind.CallOpts, requestID *big.Int) ([32]byte, error) { + if requestID.String() == "1" { + // Request 1 is already fulfilled + return [32]byte{}, nil + } else if requestID.String() == "2" { + // Request 2 errors + return [32]byte{}, errors.New("error getting commitment") + } else { + // All other requests are unfulfilled + return [32]byte{1}, nil + } + }, + HeaderByNumber: func(ctx context.Context, n *big.Int) (*types.Header, error) { + return &types.Header{ + Number: big.NewInt(1), + }, nil + }, + RequestBlockNumber: big.NewInt(1), + } t.Run("already fulfilled", func(t *testing.T) { tx, attempt := newTx(t, big.NewInt(1)) @@ -267,5 +289,24 @@ func TestTransmitCheckers(t *testing.T) { tx, attempt := newTx(t, big.NewInt(2)) require.NoError(t, checker.Check(ctx, log, tx, attempt)) }) + + t.Run("can't get header", func(t *testing.T) { + checker.HeaderByNumber = func(ctx context.Context, n *big.Int) (*types.Header, error) { + return nil, errors.New("can't get head") + } + tx, attempt := newTx(t, big.NewInt(3)) + require.NoError(t, checker.Check(ctx, log, tx, attempt)) + }) + + t.Run("nil request block number", func(t *testing.T) { + checker.HeaderByNumber = func(ctx context.Context, n *big.Int) (*types.Header, error) { + return &types.Header{ + Number: big.NewInt(1), + }, nil + } + checker.RequestBlockNumber = nil + tx, attempt := newTx(t, big.NewInt(4)) + require.NoError(t, checker.Check(ctx, log, tx, attempt)) + }) }) } diff --git a/core/chains/evm/txmgr/txmgr.go b/core/chains/evm/txmgr/txmgr.go index 8e937cb21dc..da5c102b338 100644 --- a/core/chains/evm/txmgr/txmgr.go +++ b/core/chains/evm/txmgr/txmgr.go @@ -17,17 +17,17 @@ import ( "github.com/smartcontractkit/sqlx" "github.com/smartcontractkit/chainlink/core/assets" - "github.com/smartcontractkit/chainlink/core/chains" evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" "github.com/smartcontractkit/chainlink/core/chains/evm/gas" httypes "github.com/smartcontractkit/chainlink/core/chains/evm/headtracker/types" + "github.com/smartcontractkit/chainlink/core/chains/evm/label" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/null" "github.com/smartcontractkit/chainlink/core/services" "github.com/smartcontractkit/chainlink/core/services/keystore/keys/ethkey" "github.com/smartcontractkit/chainlink/core/services/pg" - "github.com/smartcontractkit/chainlink/core/static" "github.com/smartcontractkit/chainlink/core/utils" ) @@ -412,8 +412,8 @@ func (c *ChainKeyStore) SignTx(address common.Address, tx *gethTypes.Transaction return hash, rlp.Bytes(), nil } -func signedTxHash(signedTx *gethTypes.Transaction, chainType chains.ChainType) (hash common.Hash, err error) { - if chainType == chains.ExChain { +func signedTxHash(signedTx *gethTypes.Transaction, chainType config.ChainType) (hash common.Hash, err error) { + if chainType == config.ChainExChain { hash, err = exchainutils.LegacyHash(signedTx) if err != nil { return hash, errors.Wrap(err, "error getting signed tx hash from exchain") @@ -530,7 +530,7 @@ func CheckEthTxQueueCapacity(q pg.Queryer, fromAddress common.Address, maxQueued } if count >= maxQueuedTransactions { - err = errors.Errorf("cannot create transaction; too many unstarted transactions in the queue (%v/%v). %s", count, maxQueuedTransactions, static.EvmMaxQueuedTransactionsLabel) + err = errors.Errorf("cannot create transaction; too many unstarted transactions in the queue (%v/%v). %s", count, maxQueuedTransactions, label.MaxQueuedTransactionsWarning) } return } diff --git a/core/chains/evm/txmgr/txmgr_test.go b/core/chains/evm/txmgr/txmgr_test.go index 980c2e13b4a..44bd59b8a59 100644 --- a/core/chains/evm/txmgr/txmgr_test.go +++ b/core/chains/evm/txmgr/txmgr_test.go @@ -16,9 +16,9 @@ import ( "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink/core/assets" - "github.com/smartcontractkit/chainlink/core/chains" "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr" txmmocks "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr/mocks" + "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" @@ -127,13 +127,12 @@ func TestTxm_CheckEthTxQueueCapacity(t *testing.T) { t.Run("with equal or more unstarted eth_txes than limit returns error", func(t *testing.T) { err := txmgr.CheckEthTxQueueCapacity(db, fromAddress, maxUnconfirmedTransactions, cltest.FixtureChainID) require.Error(t, err) - require.EqualError(t, err, fmt.Sprintf("cannot create transaction; too many unstarted transactions in the queue (2/%d). WARNING: Hitting ETH_MAX_QUEUED_TRANSACTIONS is a sanity limit and should never happen under normal operation. This error is very unlikely to be a problem with Chainlink, and instead more likely to be caused by a problem with your eth node's connectivity. Check your eth node: it may not be broadcasting transactions to the network, or it might be overloaded and evicting Chainlink's transactions from its mempool. Increasing ETH_MAX_QUEUED_TRANSACTIONS is almost certainly not the correct action to take here unless you ABSOLUTELY know what you are doing, and will probably make things worse", maxUnconfirmedTransactions)) + require.Contains(t, err.Error(), fmt.Sprintf("cannot create transaction; too many unstarted transactions in the queue (2/%d). WARNING: Hitting ETH_MAX_QUEUED_TRANSACTIONS", maxUnconfirmedTransactions)) cltest.MustInsertUnstartedEthTx(t, borm, fromAddress) err = txmgr.CheckEthTxQueueCapacity(db, fromAddress, maxUnconfirmedTransactions, cltest.FixtureChainID) require.Error(t, err) - - require.EqualError(t, err, fmt.Sprintf("cannot create transaction; too many unstarted transactions in the queue (3/%d). WARNING: Hitting ETH_MAX_QUEUED_TRANSACTIONS is a sanity limit and should never happen under normal operation. This error is very unlikely to be a problem with Chainlink, and instead more likely to be caused by a problem with your eth node's connectivity. Check your eth node: it may not be broadcasting transactions to the network, or it might be overloaded and evicting Chainlink's transactions from its mempool. Increasing ETH_MAX_QUEUED_TRANSACTIONS is almost certainly not the correct action to take here unless you ABSOLUTELY know what you are doing, and will probably make things worse", maxUnconfirmedTransactions)) + require.Contains(t, err.Error(), fmt.Sprintf("cannot create transaction; too many unstarted transactions in the queue (3/%d). WARNING: Hitting ETH_MAX_QUEUED_TRANSACTIONS", maxUnconfirmedTransactions)) }) t.Run("with different chain ID ignores txes", func(t *testing.T) { @@ -264,7 +263,8 @@ func TestTxm_CreateEthTransaction(t *testing.T) { Meta: nil, Strategy: txmgr.SendEveryStrategy{}, }) - assert.EqualError(t, err, "Txm#CreateEthTransaction: cannot create transaction; too many unstarted transactions in the queue (1/1). WARNING: Hitting ETH_MAX_QUEUED_TRANSACTIONS is a sanity limit and should never happen under normal operation. This error is very unlikely to be a problem with Chainlink, and instead more likely to be caused by a problem with your eth node's connectivity. Check your eth node: it may not be broadcasting transactions to the network, or it might be overloaded and evicting Chainlink's transactions from its mempool. Increasing ETH_MAX_QUEUED_TRANSACTIONS is almost certainly not the correct action to take here unless you ABSOLUTELY know what you are doing, and will probably make things worse") + require.Error(t, err) + assert.Contains(t, err.Error(), "Txm#CreateEthTransaction: cannot create transaction; too many unstarted transactions in the queue (1/1). WARNING: Hitting ETH_MAX_QUEUED_TRANSACTIONS") }) t.Run("doesn't insert eth_tx if a matching tx already exists for that pipeline_task_run_id", func(t *testing.T) { @@ -348,7 +348,8 @@ func TestTxm_CreateEthTransaction(t *testing.T) { t.Run("meta and vrf checker", func(t *testing.T) { pgtest.MustExec(t, db, `DELETE FROM eth_txes`) - + testDefaultSubID := uint64(2) + testDefaultMaxLink := "1000000000000000000" jobID := int32(25) requestID := gethcommon.HexToHash("abcd") requestTxHash := gethcommon.HexToHash("dcba") @@ -356,8 +357,8 @@ func TestTxm_CreateEthTransaction(t *testing.T) { JobID: jobID, RequestID: requestID, RequestTxHash: requestTxHash, - MaxLink: "1000000000000000000", // 1e18 - SubID: 2, + MaxLink: &testDefaultMaxLink, // 1e18 + SubID: &testDefaultSubID, } config.On("EvmMaxQueuedTransactions").Return(uint64(1)).Once() checker := txmgr.TransmitCheckerSpec{ @@ -599,7 +600,7 @@ func TestTxm_SignTx(t *testing.T) { chainID := big.NewInt(1) cfg := new(txmmocks.Config) cfg.Test(t) - cfg.On("ChainType").Return(chains.ChainType("")) + cfg.On("ChainType").Return(config.ChainType("")) kst := new(ksmocks.Eth) kst.Test(t) kst.On("SignTx", to, tx, chainID).Return(tx, nil).Once() @@ -613,7 +614,7 @@ func TestTxm_SignTx(t *testing.T) { chainID := big.NewInt(1) cfg := new(txmmocks.Config) cfg.Test(t) - cfg.On("ChainType").Return(chains.ExChain) + cfg.On("ChainType").Return(config.ChainExChain) kst := new(ksmocks.Eth) kst.Test(t) kst.On("SignTx", to, tx, chainID).Return(tx, nil).Once() diff --git a/core/chains/evm/types/types.go b/core/chains/evm/types/types.go index 91c25d5c090..2a4a3f25afc 100644 --- a/core/chains/evm/types/types.go +++ b/core/chains/evm/types/types.go @@ -13,6 +13,7 @@ import ( "gopkg.in/guregu/null.v4" "github.com/smartcontractkit/chainlink/core/assets" + "github.com/smartcontractkit/chainlink/core/chains" "github.com/smartcontractkit/chainlink/core/services/pg" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/utils" @@ -27,26 +28,29 @@ type NewNode struct { } type ChainConfigORM interface { - StoreString(chainID *big.Int, key, val string) error - Clear(chainID *big.Int, key string) error + StoreString(chainID utils.Big, key, val string) error + Clear(chainID utils.Big, key string) error } -//go:generate mockery --name ORM --output ./../mocks/ --case=underscore type ORM interface { - EnabledChainsWithNodes() ([]Chain, error) - Chain(id utils.Big) (chain Chain, err error) - CreateChain(id utils.Big, config ChainCfg) (Chain, error) - UpdateChain(id utils.Big, enabled bool, config ChainCfg) (Chain, error) - DeleteChain(id utils.Big) error - Chains(offset, limit int) ([]Chain, int, error) - CreateNode(data NewNode) (Node, error) - DeleteNode(id int64) error + Chain(id utils.Big, qopts ...pg.QOpt) (chain Chain, err error) + Chains(offset, limit int, qopts ...pg.QOpt) ([]Chain, int, error) + CreateChain(id utils.Big, config ChainCfg, qopts ...pg.QOpt) (Chain, error) + UpdateChain(id utils.Big, enabled bool, config ChainCfg, qopts ...pg.QOpt) (Chain, error) + DeleteChain(id utils.Big, qopts ...pg.QOpt) error GetChainsByIDs(ids []utils.Big) (chains []Chain, err error) + EnabledChains(...pg.QOpt) ([]Chain, error) + + CreateNode(data Node, qopts ...pg.QOpt) (Node, error) + DeleteNode(id int32, qopts ...pg.QOpt) error GetNodesByChainIDs(chainIDs []utils.Big, qopts ...pg.QOpt) (nodes []Node, err error) Node(id int32, qopts ...pg.QOpt) (Node, error) Nodes(offset, limit int, qopts ...pg.QOpt) ([]Node, int, error) NodesForChain(chainID utils.Big, offset, limit int, qopts ...pg.QOpt) ([]Node, int, error) + ChainConfigORM + + SetupNodes([]Node, []utils.Big) error } type ChainCfg struct { @@ -71,8 +75,10 @@ type ChainCfg struct { EvmHeadTrackerMaxBufferSize null.Int EvmHeadTrackerSamplingInterval *models.Duration EvmLogBackfillBatchSize null.Int + EvmLogPollInterval *models.Duration EvmMaxGasPriceWei *utils.Big EvmNonceAutoSync null.Bool + EvmUseForwarders null.Bool EvmRPCDefaultBatchSize null.Int FlagsContractAddress null.String GasEstimatorMode null.String @@ -98,14 +104,7 @@ func (c ChainCfg) Value() (driver.Value, error) { return json.Marshal(c) } -type Chain struct { - ID utils.Big - Nodes []Node - Cfg ChainCfg - CreatedAt time.Time - UpdatedAt time.Time - Enabled bool -} +type Chain = chains.Chain[utils.Big, ChainCfg] type Node struct { ID int32 diff --git a/core/chains/orm.go b/core/chains/orm.go new file mode 100644 index 00000000000..f0a077a90fc --- /dev/null +++ b/core/chains/orm.go @@ -0,0 +1,345 @@ +package chains + +import ( + "database/sql" + "fmt" + "strings" + "time" + + "github.com/lib/pq" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink/core/services/pg" +) + +// ORM manages chains and nodes. +// +// I: Chain ID +// +// C: Chain Config +// +// N: Node including these default fields: +// ID int32 +// Name string +// CreatedAt time.Time +// UpdatedAt time.Time +type ORM[I, C, N any] interface { + Chain(I, ...pg.QOpt) (Chain[I, C], error) + Chains(offset, limit int, qopts ...pg.QOpt) ([]Chain[I, C], int, error) + CreateChain(id I, config C, qopts ...pg.QOpt) (Chain[I, C], error) + UpdateChain(id I, enabled bool, config C, qopts ...pg.QOpt) (Chain[I, C], error) + DeleteChain(id I, qopts ...pg.QOpt) error + GetChainsByIDs(ids []I) (chains []Chain[I, C], err error) + EnabledChains(...pg.QOpt) ([]Chain[I, C], error) + + CreateNode(N, ...pg.QOpt) (N, error) + DeleteNode(int32, ...pg.QOpt) error + GetNodesByChainIDs(chainIDs []I, qopts ...pg.QOpt) (nodes []N, err error) + Node(int32, ...pg.QOpt) (N, error) + NodeNamed(string, ...pg.QOpt) (N, error) + Nodes(offset, limit int, qopts ...pg.QOpt) (nodes []N, count int, err error) + NodesForChain(chainID I, offset, limit int, qopts ...pg.QOpt) (nodes []N, count int, err error) + + StoreString(chainID I, key, val string) error + Clear(chainID I, key string) error + + // SetupNodes is a shim to help with configuring multiple nodes via ENV. + // All existing nodes are dropped, and any missing chains are automatically created. + // Then all nodes are inserted, and conflicts are ignored. + SetupNodes(nodes []N, chainIDs []I) error +} + +type orm[I, C, N any] struct { + *chainsORM[I, C] + *nodesORM[I, N] +} + +// NewORM returns an ORM backed by q, for the tables _chains and _nodes with column _chain_id. +// Additional Node fields should be included in nodeCols. +func NewORM[I, C, N any](q pg.Q, prefix string, nodeCols ...string) ORM[I, C, N] { + return orm[I, C, N]{ + newChainsORM[I, C](q, prefix), + newNodesORM[I, N](q, prefix, nodeCols...), + } +} + +func (o orm[I, C, N]) SetupNodes(nodes []N, ids []I) error { + return o.chainsORM.q.Transaction(func(q pg.Queryer) error { + tx := pg.WithQueryer(q) + if err := o.truncateNodes(tx); err != nil { + return err + } + + if err := o.ensureChains(ids, tx); err != nil { + return err + } + + return o.ensureNodes(nodes, tx) + }) +} + +// Chain is a generic DB chain for any configuration C and chain id I. +// +// C normally implements sql.Scanner and driver.Valuer, but that is not enforced here. +// +// A Chain type alias can be used for convenience: +// type Chain = chains.Chain[string, pkg.ChainCfg] +type Chain[I, C any] struct { + ID I + Cfg C + CreatedAt time.Time + UpdatedAt time.Time + Enabled bool +} + +// chainsORM is a generic ORM for chains. +type chainsORM[I, C any] struct { + q pg.Q + prefix string +} + +// newChainsORM returns an chainsORM backed by q, for the table _chains. +func newChainsORM[I, C any](q pg.Q, prefix string) *chainsORM[I, C] { + return &chainsORM[I, C]{q: q, prefix: prefix} +} + +func (o *chainsORM[I, C]) Chain(id I, qopts ...pg.QOpt) (dbchain Chain[I, C], err error) { + q := o.q.WithOpts(qopts...) + chainSQL := fmt.Sprintf(`SELECT * FROM %s_chains WHERE id = $1;`, o.prefix) + err = q.Get(&dbchain, chainSQL, id) + return +} + +func (o *chainsORM[I, C]) GetChainsByIDs(ids []I) (chains []Chain[I, C], err error) { + sql := fmt.Sprintf(`SELECT * FROM %s_chains WHERE id = ANY($1) ORDER BY created_at, id;`, o.prefix) + + chainIDs := pq.Array(ids) + if err = o.q.Select(&chains, sql, chainIDs); err != nil { + return nil, err + } + + return chains, nil +} + +func (o *chainsORM[I, C]) CreateChain(id I, config C, qopts ...pg.QOpt) (chain Chain[I, C], err error) { + q := o.q.WithOpts(qopts...) + sql := fmt.Sprintf(`INSERT INTO %s_chains (id, cfg, created_at, updated_at) VALUES ($1, $2, now(), now()) RETURNING *`, o.prefix) + err = q.Get(&chain, sql, id, config) + return +} + +func (o *chainsORM[I, C]) ensureChains(ids []I, qopts ...pg.QOpt) (err error) { + named := make([]struct{ ID I }, len(ids)) + for i, id := range ids { + named[i].ID = id + } + q := o.q.WithOpts(qopts...) + sql := fmt.Sprintf("INSERT INTO %s_chains (id, created_at, updated_at) VALUES (:id, NOW(), NOW()) ON CONFLICT DO NOTHING;", o.prefix) + + if _, err := q.NamedExec(sql, named); err != nil { + return errors.Wrapf(err, "failed to insert chains %v", ids) + } + + return nil +} + +func (o *chainsORM[I, C]) UpdateChain(id I, enabled bool, config C, qopts ...pg.QOpt) (chain Chain[I, C], err error) { + q := o.q.WithOpts(qopts...) + sql := fmt.Sprintf(`UPDATE %s_chains SET enabled = $1, cfg = $2, updated_at = now() WHERE id = $3 RETURNING *`, o.prefix) + err = q.Get(&chain, sql, enabled, config, id) + return +} + +// StoreString saves a string value into the config for the given chain and key +func (o *chainsORM[I, C]) StoreString(chainID I, name, val string) error { + s := fmt.Sprintf(`UPDATE %s_chains SET cfg = cfg || jsonb_build_object($1::text, $2::text) WHERE id = $3`, o.prefix) + res, err := o.q.Exec(s, name, val, chainID) + if err != nil { + return errors.Wrapf(err, "failed to store chain config for chain ID %v", chainID) + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return err + } + if rowsAffected == 0 { + return errors.Wrapf(sql.ErrNoRows, "no chain found with ID %v", chainID) + } + return nil +} + +// Clear deletes a config value for the given chain and key +func (o *chainsORM[I, C]) Clear(chainID I, name string) error { + s := fmt.Sprintf(`UPDATE %s_chains SET cfg = cfg - $1 WHERE id = $2`, o.prefix) + res, err := o.q.Exec(s, name, chainID) + if err != nil { + return errors.Wrapf(err, "failed to clear chain config for chain ID %v", chainID) + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return err + } + if rowsAffected == 0 { + return errors.Wrapf(sql.ErrNoRows, "no chain found with ID %v", chainID) + } + return nil +} + +func (o *chainsORM[I, C]) DeleteChain(id I, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + query := fmt.Sprintf(`DELETE FROM %s_chains WHERE id = $1`, o.prefix) + result, err := q.Exec(query, id) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected == 0 { + return sql.ErrNoRows + } + return nil +} + +func (o *chainsORM[I, C]) Chains(offset, limit int, qopts ...pg.QOpt) (chains []Chain[I, C], count int, err error) { + err = o.q.WithOpts(qopts...).Transaction(func(q pg.Queryer) error { + if err = q.Get(&count, fmt.Sprintf("SELECT COUNT(*) FROM %s_chains", o.prefix)); err != nil { + return errors.Wrap(err, "failed to fetch chains count") + } + + sql := fmt.Sprintf(`SELECT * FROM %s_chains ORDER BY created_at, id LIMIT $1 OFFSET $2;`, o.prefix) + err = q.Select(&chains, sql, pg.Limit(limit), offset) + return errors.Wrap(err, "failed to fetch chains") + }, pg.OptReadOnlyTx()) + + return +} + +func (o *chainsORM[I, C]) EnabledChains(qopts ...pg.QOpt) (chains []Chain[I, C], err error) { + q := o.q.WithOpts(qopts...) + chainsSQL := fmt.Sprintf(`SELECT * FROM %s_chains WHERE enabled ORDER BY created_at, id;`, o.prefix) + if err = q.Select(&chains, chainsSQL); err != nil { + return + } + return +} + +// nodesORM is a generic ORM for nodes. +type nodesORM[I, N any] struct { + q pg.Q + prefix string + createNodeQ string + ensureNodeQ string +} + +func newNodesORM[I, N any](q pg.Q, prefix string, nodeCols ...string) *nodesORM[I, N] { + // pre-compute query for CreateNode + var withColon []string + for _, c := range nodeCols { + withColon = append(withColon, ":"+c) + } + query := fmt.Sprintf(`INSERT INTO %s_nodes (name, %s_chain_id, %s, created_at, updated_at) + VALUES (:name, :%s_chain_id, %s, now(), now())`, + prefix, prefix, strings.Join(nodeCols, ", "), prefix, strings.Join(withColon, ", ")) + + return &nodesORM[I, N]{q: q, prefix: prefix, + createNodeQ: query + ` RETURNING *;`, + ensureNodeQ: query + ` ON CONFLICT DO NOTHING;`, + } +} + +func (o *nodesORM[I, N]) ensureNodes(nodes []N, qopts ...pg.QOpt) (err error) { + q := o.q.WithOpts(qopts...) + _, err = q.NamedExec(o.ensureNodeQ, nodes) + err = errors.Wrap(err, "failed to insert nodes") + return +} + +func (o *nodesORM[I, N]) CreateNode(data N, qopts ...pg.QOpt) (node N, err error) { + q := o.q.WithOpts(qopts...) + stmt, err := q.PrepareNamed(o.createNodeQ) + if err != nil { + return node, err + } + err = stmt.Get(&node, data) + return node, err +} + +func (o *nodesORM[I, N]) DeleteNode(id int32, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + query := fmt.Sprintf(`DELETE FROM %s_nodes WHERE id = $1`, o.prefix) + result, err := q.Exec(query, id) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected == 0 { + return sql.ErrNoRows + } + return nil +} + +func (o *nodesORM[I, N]) truncateNodes(qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + _, err := q.Exec(fmt.Sprintf(`TRUNCATE %s_nodes;`, o.prefix)) + if err != nil { + return errors.Wrapf(err, "failed to truncate %s_nodes table", o.prefix) + } + return nil +} + +func (o *nodesORM[I, N]) Node(id int32, qopts ...pg.QOpt) (node N, err error) { + q := o.q.WithOpts(qopts...) + err = q.Get(&node, fmt.Sprintf("SELECT * FROM %s_nodes WHERE id = $1;", o.prefix), id) + + return +} + +func (o *nodesORM[I, N]) NodeNamed(name string, qopts ...pg.QOpt) (node N, err error) { + q := o.q.WithOpts(qopts...) + err = q.Get(&node, fmt.Sprintf("SELECT * FROM %s_nodes WHERE name = $1;", o.prefix), name) + + return +} + +func (o *nodesORM[I, N]) Nodes(offset, limit int, qopts ...pg.QOpt) (nodes []N, count int, err error) { + err = o.q.WithOpts(qopts...).Transaction(func(q pg.Queryer) error { + if err = q.Get(&count, fmt.Sprintf("SELECT COUNT(*) FROM %s_nodes", o.prefix)); err != nil { + return errors.Wrap(err, "failed to fetch nodes count") + } + + sql := fmt.Sprintf(`SELECT * FROM %s_nodes ORDER BY created_at, id LIMIT $1 OFFSET $2;`, o.prefix) + err = q.Select(&nodes, sql, pg.Limit(limit), offset) + return errors.Wrap(err, "failed to fetch nodes") + }, pg.OptReadOnlyTx()) + + return +} + +func (o *nodesORM[I, N]) NodesForChain(chainID I, offset, limit int, qopts ...pg.QOpt) (nodes []N, count int, err error) { + err = o.q.WithOpts(qopts...).Transaction(func(q pg.Queryer) error { + if err = q.Get(&count, fmt.Sprintf("SELECT COUNT(*) FROM %s_nodes WHERE %s_chain_id = $1", o.prefix, o.prefix), chainID); err != nil { + return errors.Wrap(err, "failed to fetch nodes count") + } + + sql := fmt.Sprintf(`SELECT * FROM %s_nodes WHERE %s_chain_id = $1 ORDER BY created_at, id LIMIT $2 OFFSET $3;`, o.prefix, o.prefix) + err = q.Select(&nodes, sql, chainID, pg.Limit(limit), offset) + return errors.Wrap(err, "failed to fetch nodes") + }, pg.OptReadOnlyTx()) + + return +} + +func (o *nodesORM[I, N]) GetNodesByChainIDs(chainIDs []I, qopts ...pg.QOpt) (nodes []N, err error) { + sql := fmt.Sprintf(`SELECT * FROM %s_nodes WHERE %s_chain_id = ANY($1) ORDER BY created_at, id;`, o.prefix, o.prefix) + + cids := pq.Array(chainIDs) + if err = o.q.WithOpts(qopts...).Select(&nodes, sql, cids); err != nil { + return nil, err + } + + return nodes, nil +} diff --git a/core/chains/orm_test.go b/core/chains/orm_test.go new file mode 100644 index 00000000000..07e3be67c26 --- /dev/null +++ b/core/chains/orm_test.go @@ -0,0 +1,29 @@ +package chains_test + +import ( + "time" + + "gopkg.in/guregu/null.v4" + + "github.com/smartcontractkit/chainlink/core/chains" + "github.com/smartcontractkit/chainlink/core/services/pg" +) + +func ExampleNewORM() { + type Config struct { + Foo null.String + } + type Node = struct { + ID int32 + Name string + ExampleChainID string + URL string + Bar null.Int + CreatedAt time.Time + UpdatedAt time.Time + } + var q pg.Q + _ = chains.NewORM[string, Config, Node](q, "example", "url", "bar") + + // Output: +} diff --git a/core/chains/solana/chain.go b/core/chains/solana/chain.go new file mode 100644 index 00000000000..05bed93b72e --- /dev/null +++ b/core/chains/solana/chain.go @@ -0,0 +1,207 @@ +package solana + +import ( + "context" + "math" + "math/rand" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana" + solanaclient "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/config" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + "github.com/smartcontractkit/sqlx" + + "github.com/smartcontractkit/chainlink/core/chains/solana/monitor" + "github.com/smartcontractkit/chainlink/core/chains/solana/soltxm" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services" + "github.com/smartcontractkit/chainlink/core/services/keystore" + "github.com/smartcontractkit/chainlink/core/services/pg" + "github.com/smartcontractkit/chainlink/core/utils" +) + +// DefaultRequestTimeout is the default Solana client timeout. +const DefaultRequestTimeout = 30 * time.Second + +//go:generate mockery --name TxManager --srcpkg github.com/smartcontractkit/chainlink-solana/pkg/solana --output ./mocks/ --case=underscore +//go:generate mockery --name Reader --srcpkg github.com/smartcontractkit/chainlink-solana/pkg/solana/client --output ./mocks/ --case=underscore +//go:generate mockery --name Chain --srcpkg github.com/smartcontractkit/chainlink-solana/pkg/solana --output ./mocks/ --case=underscore +var _ solana.Chain = (*chain)(nil) + +type chain struct { + utils.StartStopOnce + id string + cfg config.Config + txm *soltxm.Txm + balanceMonitor services.ServiceCtx + orm ORM + lggr logger.Logger + + // tracking node chain id for verification + clientCache map[string]cachedClient // map URL -> {client, chainId} [mainnet/testnet/devnet/localnet] + clientLock sync.RWMutex +} + +type cachedClient struct { + id string + rw solanaclient.ReaderWriter +} + +// NewChain returns a new chain backed by node. +func NewChain(db *sqlx.DB, ks keystore.Solana, logCfg pg.LogConfig, eb pg.EventBroadcaster, dbchain Chain, orm ORM, lggr logger.Logger) (*chain, error) { + cfg := config.NewConfig(dbchain.Cfg, lggr) + lggr = lggr.With("chainID", dbchain.ID, "chainSet", "solana") + var ch = chain{ + id: dbchain.ID, + cfg: cfg, + orm: orm, + lggr: lggr.Named("Chain"), + clientCache: map[string]cachedClient{}, + } + tc := func() (solanaclient.ReaderWriter, error) { + return ch.getClient() + } + ch.txm = soltxm.NewTxm(tc, cfg, lggr) + ch.balanceMonitor = monitor.NewBalanceMonitor(ch.id, cfg, lggr, ks, ch.Reader) + return &ch, nil +} + +func (c *chain) ID() string { + return c.id +} + +func (c *chain) Config() config.Config { + return c.cfg +} + +func (c *chain) UpdateConfig(cfg db.ChainCfg) { + c.cfg.Update(cfg) +} + +func (c *chain) TxManager() solana.TxManager { + return c.txm +} + +func (c *chain) Reader() (solanaclient.Reader, error) { + return c.getClient() +} + +// getClient returns a client, randomly selecting one from available and valid nodes +func (c *chain) getClient() (solanaclient.ReaderWriter, error) { + var node db.Node + var client solanaclient.ReaderWriter + nodes, cnt, err := c.orm.NodesForChain(c.id, 0, math.MaxInt) + if err != nil { + return nil, errors.Wrap(err, "failed to get nodes") + } + if cnt == 0 { + return nil, errors.New("no nodes available") + } + rand.Seed(time.Now().Unix()) // seed randomness otherwise it will return the same each time + // #nosec + index := rand.Perm(len(nodes)) // list of node indexes to try + for _, i := range index { + node = nodes[i] + // create client and check + client, err = c.verifiedClient(node) + // if error, try another node + if err != nil { + c.lggr.Warnw("failed to create node", "name", node.Name, "solana-url", node.SolanaURL, "error", err.Error()) + continue + } + // if all checks passed, mark found and break loop + break + } + // if no valid node found, exit with error + if client == nil { + return nil, errors.New("no node valid nodes available") + } + c.lggr.Debugw("Created client", "name", node.Name, "solana-url", node.SolanaURL) + return client, nil +} + +// verifiedClient returns a client for node or an error if the chain id does not match. +func (c *chain) verifiedClient(node db.Node) (solanaclient.ReaderWriter, error) { + url := node.SolanaURL + var err error + + // check if cached client exists + c.clientLock.RLock() + client, exists := c.clientCache[url] + c.clientLock.RUnlock() + + if !exists { + // create client + client.rw, err = solanaclient.NewClient(url, c.cfg, DefaultRequestTimeout, c.lggr.Named("Client-"+node.Name)) + if err != nil { + return nil, errors.Wrap(err, "failed to create client") + } + + client.id, err = client.rw.ChainID() + if err != nil { + return nil, errors.Wrap(err, "failed to fetch ChainID in checkClient") + } + } + + // check chainID matches expected chainID + expectedID := strings.ToLower(c.id) + if client.id != expectedID { + return nil, errors.Errorf("client returned mismatched chain id (expected: %s, got: %s): %s", expectedID, client.id, url) + } + + // save client if doesn't exist and checks have passed + // if checks failed, client is not saved and can retry when a new client is requested + if !exists { + c.clientLock.Lock() + // recheck when writing to prevent parallel writes (discard duplicate if exists) + if cached, exists := c.clientCache[url]; !exists { + c.clientCache[url] = client + } else { + client = cached + } + c.clientLock.Unlock() + } + + return client.rw, nil +} + +func (c *chain) Start(ctx context.Context) error { + return c.StartOnce("Chain", func() error { + c.lggr.Debug("Starting") + c.lggr.Debug("Starting txm") + c.lggr.Debug("Starting balance monitor") + return multierr.Combine( + c.txm.Start(ctx), + c.balanceMonitor.Start(ctx)) + }) +} + +func (c *chain) Close() error { + return c.StopOnce("Chain", func() error { + c.lggr.Debug("Stopping") + c.lggr.Debug("Stopping txm") + c.lggr.Debug("Stopping balance monitor") + return multierr.Combine(c.txm.Close(), + c.balanceMonitor.Close()) + }) +} + +func (c *chain) Ready() error { + return multierr.Combine( + c.StartStopOnce.Ready(), + c.txm.Ready(), + ) +} + +func (c *chain) Healthy() error { + return multierr.Combine( + c.StartStopOnce.Healthy(), + c.txm.Healthy(), + ) +} diff --git a/core/chains/solana/chain_set.go b/core/chains/solana/chain_set.go new file mode 100644 index 00000000000..5afd9064677 --- /dev/null +++ b/core/chains/solana/chain_set.go @@ -0,0 +1,297 @@ +// TODO: Improve code reuse (mostly c/p of core/chains/terra/chain_set.go) +package solana + +import ( + "context" + "database/sql" + "fmt" + "sync" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + "github.com/smartcontractkit/sqlx" + + coreconfig "github.com/smartcontractkit/chainlink/core/config" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/keystore" + "github.com/smartcontractkit/chainlink/core/services/pg" + "github.com/smartcontractkit/chainlink/core/utils" +) + +var ( + // ErrChainIDEmpty is returned when chain is required but was empty. + ErrChainIDEmpty = errors.New("chain id empty") + // ErrChainIDInvalid is returned when a chain id does not match any configured chains. + ErrChainIDInvalid = errors.New("chain id does not match any local chains") +) + +// ChainSetOpts holds options for configuring a ChainSet. +type ChainSetOpts struct { + Config coreconfig.GeneralConfig + Logger logger.Logger + DB *sqlx.DB + KeyStore keystore.Solana + EventBroadcaster pg.EventBroadcaster + ORM ORM +} + +func (o ChainSetOpts) validate() (err error) { + required := func(s string) error { + return errors.Errorf("%s is required", s) + } + if o.Config == nil { + err = multierr.Append(err, required("Config")) + } + if o.Logger == nil { + err = multierr.Append(err, required("Logger'")) + } + if o.DB == nil { + err = multierr.Append(err, required("DB")) + } + if o.KeyStore == nil { + err = multierr.Append(err, required("KeyStore")) + } + if o.EventBroadcaster == nil { + err = multierr.Append(err, required("EventBroadcaster")) + } + if o.ORM == nil { + err = multierr.Append(err, required("ORM")) + } + return +} + +func (o ChainSetOpts) newChain(dbchain Chain) (*chain, error) { + if !dbchain.Enabled { + return nil, errors.Errorf("cannot create new chain with ID %s, the chain is disabled", dbchain.ID) + } + return NewChain(o.DB, o.KeyStore, o.Config, o.EventBroadcaster, dbchain, o.ORM, o.Logger) +} + +// ChainSet extends solana.ChainSet with mutability and exposes the underlying ORM. +type ChainSet interface { + solana.ChainSet + + Add(context.Context, string, db.ChainCfg) (Chain, error) + Remove(string) error + Configure(ctx context.Context, id string, enabled bool, config db.ChainCfg) (Chain, error) + + ORM() ORM +} + +//go:generate mockery --name ChainSet --srcpkg github.com/smartcontractkit/chainlink-solana/pkg/solana --output ./mocks/ --case=underscore +var _ ChainSet = (*chainSet)(nil) + +type chainSet struct { + utils.StartStopOnce + opts ChainSetOpts + chainsMu sync.RWMutex + chains map[string]*chain + lggr logger.Logger +} + +// NewChainSet returns a new chain set for opts. +func NewChainSet(opts ChainSetOpts) (*chainSet, error) { + if err := opts.validate(); err != nil { + return nil, err + } + dbchains, err := opts.ORM.EnabledChains() + if err != nil { + return nil, errors.Wrap(err, "error loading chains") + } + cs := chainSet{ + opts: opts, + chains: make(map[string]*chain), + lggr: opts.Logger.Named("ChainSet"), + } + for _, dbc := range dbchains { + var err2 error + cs.chains[dbc.ID], err2 = opts.newChain(dbc) + if err2 != nil { + err = multierr.Combine(err, err2) + continue + } + } + return &cs, err +} + +func (c *chainSet) ORM() ORM { + return c.opts.ORM +} + +func (c *chainSet) Chain(ctx context.Context, id string) (solana.Chain, error) { + if id == "" { + return nil, ErrChainIDEmpty + } + if err := c.StartStopOnce.Ready(); err != nil { + return nil, err + } + c.chainsMu.RLock() + ch := c.chains[id] + c.chainsMu.RUnlock() + if ch != nil { + // Already known/started + return ch, nil + } + + // Unknown/unstarted + c.chainsMu.Lock() + defer c.chainsMu.Unlock() + + // Double check now that we have the lock, so we don't start an orphan. + if err := c.StartStopOnce.Ready(); err != nil { + return nil, err + } + + ch = c.chains[id] + if ch != nil { + // Someone else beat us to it + return ch, nil + } + + // Do we have nodes/config? + opts := c.opts + dbchain, err := opts.ORM.Chain(id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, ErrChainIDInvalid + } + return nil, err + } + + err = c.initializeChain(ctx, &dbchain) + if err != nil { + return nil, err + } + return c.chains[id], nil +} + +func (c *chainSet) Add(ctx context.Context, id string, config db.ChainCfg) (Chain, error) { + c.chainsMu.Lock() + defer c.chainsMu.Unlock() + + if _, exists := c.chains[id]; exists { + return Chain{}, errors.Errorf("chain already exists with id %s", id) + } + + dbchain, err := c.opts.ORM.CreateChain(id, config) + if err != nil { + return Chain{}, err + } + return dbchain, c.initializeChain(ctx, &dbchain) +} + +// Requires a lock on chainsMu +func (c *chainSet) initializeChain(ctx context.Context, dbchain *Chain) error { + // Start it + cid := dbchain.ID + chain, err := c.opts.newChain(*dbchain) + if err != nil { + return errors.Wrapf(err, "initializeChain: failed to instantiate chain %s", dbchain.ID) + } + if err = chain.Start(ctx); err != nil { + return errors.Wrapf(err, "initializeChain: failed to start chain %s", dbchain.ID) + } + c.chains[cid] = chain + return nil +} + +func (c *chainSet) Remove(id string) error { + c.chainsMu.Lock() + defer c.chainsMu.Unlock() + + if err := c.opts.ORM.DeleteChain(id); err != nil { + return err + } + + chain, exists := c.chains[id] + if !exists { + // If a chain was removed from the DB that wasn't loaded into the memory set we're done. + return nil + } + delete(c.chains, id) + return chain.Close() +} + +func (c *chainSet) Configure(ctx context.Context, id string, enabled bool, config db.ChainCfg) (Chain, error) { + c.chainsMu.Lock() + defer c.chainsMu.Unlock() + + // Update configuration stored in the database + dbchain, err := c.opts.ORM.UpdateChain(id, enabled, config) + if err != nil { + return Chain{}, err + } + + chain, exists := c.chains[id] + + switch { + case exists && !enabled: + // Chain was toggled to disabled + delete(c.chains, id) + return Chain{}, chain.Close() + case !exists && enabled: + // Chain was toggled to enabled + return dbchain, c.initializeChain(ctx, &dbchain) + case exists: + // Exists in memory, no toggling: Update in-memory chain + chain.UpdateConfig(config) + } + + return dbchain, nil +} + +func (c *chainSet) Start(ctx context.Context) error { + //TODO if disabled, warn and return? + return c.StartOnce("ChainSet", func() error { + c.lggr.Debug("Starting") + + c.chainsMu.Lock() + defer c.chainsMu.Unlock() + var started int + for _, ch := range c.chains { + if err := ch.Start(ctx); err != nil { + c.lggr.Errorw(fmt.Sprintf("Chain with ID %s failed to start. You will need to fix this issue and restart the Chainlink node before any services that use this chain will work properly. Got error: %v", ch.ID(), err), "err", err) + continue + } + started++ + } + c.lggr.Info(fmt.Sprintf("Started %d/%d chains", started, len(c.chains))) + return nil + }) +} + +func (c *chainSet) Close() error { + return c.StopOnce("ChainSet", func() (err error) { + c.lggr.Debug("Stopping") + + c.chainsMu.Lock() + defer c.chainsMu.Unlock() + for _, c := range c.chains { + err = multierr.Combine(err, c.Close()) + } + return + }) +} + +func (c *chainSet) Ready() (err error) { + err = c.StartStopOnce.Ready() + c.chainsMu.RLock() + defer c.chainsMu.RUnlock() + for _, c := range c.chains { + err = multierr.Combine(err, c.Ready()) + } + return +} + +func (c *chainSet) Healthy() (err error) { + err = c.StartStopOnce.Healthy() + c.chainsMu.RLock() + defer c.chainsMu.RUnlock() + for _, c := range c.chains { + err = multierr.Combine(err, c.Healthy()) + } + return +} diff --git a/core/chains/solana/chain_test.go b/core/chains/solana/chain_test.go new file mode 100644 index 00000000000..96cd9fb2eea --- /dev/null +++ b/core/chains/solana/chain_test.go @@ -0,0 +1,250 @@ +package solana + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + + "github.com/pkg/errors" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/config" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/pg" +) + +const TestSolanaGenesisHashTemplate = `{"jsonrpc":"2.0","result":"%s","id":1}` + +func TestSolanaChain_GetClient(t *testing.T) { + checkOnce := map[string]struct{}{} + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + out := fmt.Sprintf(TestSolanaGenesisHashTemplate, client.MainnetGenesisHash) // mainnet genesis hash + + if !strings.Contains(r.URL.Path, "/mismatch") { + // devnet gensis hash + out = fmt.Sprintf(TestSolanaGenesisHashTemplate, client.DevnetGenesisHash) + + // clients with correct chainID should request chainID only once + if _, exists := checkOnce[r.URL.Path]; exists { + assert.NoError(t, errors.Errorf("rpc has been called once already for successful client '%s'", r.URL.Path)) + } + checkOnce[r.URL.Path] = struct{}{} + } + + _, err := w.Write([]byte(out)) + require.NoError(t, err) + })) + defer mockServer.Close() + + solORM := &mockORM{} + lggr := logger.TestLogger(t) + testChain := chain{ + id: "devnet", + orm: solORM, + cfg: config.NewConfig(db.ChainCfg{}, lggr), + lggr: logger.TestLogger(t), + clientCache: map[string]cachedClient{}, + } + + // random nodes (happy path, all valid) + solORM.nodesForChain = []db.Node{ + db.Node{ + SolanaChainID: "devnet", + SolanaURL: mockServer.URL + "/1", + }, + db.Node{ + SolanaChainID: "devnet", + SolanaURL: mockServer.URL + "/2", + }, + } + _, err := testChain.getClient() + assert.NoError(t, err) + + // random nodes (happy path, 1 valid + multiple invalid) + solORM.nodesForChain = []db.Node{ + db.Node{ + SolanaChainID: "devnet", + SolanaURL: mockServer.URL + "/1", + }, + db.Node{ + SolanaChainID: "devnet", + SolanaURL: mockServer.URL + "/mismatch/1", + }, + db.Node{ + SolanaChainID: "devnet", + SolanaURL: mockServer.URL + "/mismatch/2", + }, + db.Node{ + SolanaChainID: "devnet", + SolanaURL: mockServer.URL + "/mismatch/3", + }, + db.Node{ + SolanaChainID: "devnet", + SolanaURL: mockServer.URL + "/mismatch/4", + }, + } + _, err = testChain.getClient() + assert.NoError(t, err) + + // empty nodes response + solORM.nodesForChain = nil + _, err = testChain.getClient() + assert.Error(t, err) + + // no valid nodes to select from + solORM.nodesForChain = []db.Node{ + db.Node{ + SolanaChainID: "devnet", + SolanaURL: mockServer.URL + "/mismatch/1", + }, + db.Node{ + SolanaChainID: "devnet", + SolanaURL: mockServer.URL + "/mismatch/2", + }, + } + _, err = testChain.getClient() + assert.Error(t, err) +} + +func TestSolanaChain_VerifiedClient(t *testing.T) { + called := false + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + out := `{ "jsonrpc": "2.0", "result": 1234, "id": 1 }` // getSlot response + + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + + // handle getGenesisHash request + if strings.Contains(string(body), "getGenesisHash") { + // should only be called once, chainID will be cached in chain + if called { + assert.NoError(t, errors.New("rpc has been called once already")) + } + // devnet gensis hash + out = fmt.Sprintf(TestSolanaGenesisHashTemplate, client.DevnetGenesisHash) + } + _, err = w.Write([]byte(out)) + require.NoError(t, err) + called = true + })) + defer mockServer.Close() + + lggr := logger.TestLogger(t) + testChain := chain{ + cfg: config.NewConfig(db.ChainCfg{}, lggr), + lggr: logger.TestLogger(t), + clientCache: map[string]cachedClient{}, + } + node := db.Node{SolanaURL: mockServer.URL} + + // happy path + testChain.id = "devnet" + _, err := testChain.verifiedClient(node) + assert.NoError(t, err) + + // retrieve cached client and retrieve slot height + c, err := testChain.verifiedClient(node) + assert.NoError(t, err) + slot, err := c.SlotHeight() + assert.NoError(t, err) + assert.Equal(t, uint64(1234), slot) + + // expect error from id mismatch (even if using a cached client) + testChain.id = "incorrect" + _, err = testChain.verifiedClient(node) + assert.Error(t, err) +} + +func TestSolanaChain_VerifiedClient_ParallelClients(t *testing.T) { + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + out := fmt.Sprintf(TestSolanaGenesisHashTemplate, client.DevnetGenesisHash) + _, err := w.Write([]byte(out)) + require.NoError(t, err) + })) + defer mockServer.Close() + + lggr := logger.TestLogger(t) + testChain := chain{ + id: "devnet", + cfg: config.NewConfig(db.ChainCfg{}, lggr), + lggr: logger.TestLogger(t), + clientCache: map[string]cachedClient{}, + } + node := db.Node{SolanaURL: mockServer.URL} + + var wg sync.WaitGroup + wg.Add(2) + + var client0 client.ReaderWriter + var client1 client.ReaderWriter + var err0 error + var err1 error + + // call verifiedClient in parallel + go func() { + client0, err0 = testChain.verifiedClient(node) + assert.NoError(t, err0) + wg.Done() + }() + go func() { + client1, err1 = testChain.verifiedClient(node) + assert.NoError(t, err1) + wg.Done() + }() + + wg.Wait() + // check if pointers are all the same + assert.Equal(t, testChain.clientCache[mockServer.URL].rw, client0) + assert.Equal(t, testChain.clientCache[mockServer.URL].rw, client1) +} + +var _ ORM = &mockORM{} + +type mockORM struct { + nodesForChain []db.Node +} + +func (m *mockORM) NodesForChain(chainID string, offset, limit int, qopts ...pg.QOpt) (nodes []db.Node, count int, err error) { + return m.nodesForChain, len(m.nodesForChain), nil +} + +func (m *mockORM) Chain(s string, opt ...pg.QOpt) (Chain, error) { panic("unimplemented") } + +func (m *mockORM) Chains(offset, limit int, qopts ...pg.QOpt) ([]Chain, int, error) { + panic("unimplemented") +} + +func (m *mockORM) CreateChain(id string, config db.ChainCfg, qopts ...pg.QOpt) (Chain, error) { + panic("unimplemented") +} + +func (m *mockORM) UpdateChain(id string, enabled bool, config db.ChainCfg, qopts ...pg.QOpt) (Chain, error) { + panic("unimplemented") +} + +func (m *mockORM) DeleteChain(id string, qopts ...pg.QOpt) error { panic("unimplemented") } + +func (m *mockORM) EnabledChains(opt ...pg.QOpt) ([]Chain, error) { panic("unimplemented") } + +func (m *mockORM) CreateNode(node db.Node, opt ...pg.QOpt) (db.Node, error) { + panic("unimplemented") +} + +func (m *mockORM) DeleteNode(i int32, opt ...pg.QOpt) error { panic("unimplemented") } + +func (m *mockORM) Node(i int32, opt ...pg.QOpt) (db.Node, error) { panic("unimplemented") } + +func (m *mockORM) NodeNamed(s string, opt ...pg.QOpt) (db.Node, error) { panic("unimplemented") } + +func (m *mockORM) Nodes(offset, limit int, qopts ...pg.QOpt) (nodes []db.Node, count int, err error) { + panic("unimplemented") +} + +func (m *mockORM) SetupNodes([]db.Node, []string) error { panic("unimplemented") } diff --git a/core/chains/solana/legacy.go b/core/chains/solana/legacy.go new file mode 100644 index 00000000000..51fa04182af --- /dev/null +++ b/core/chains/solana/legacy.go @@ -0,0 +1,53 @@ +package solana + +import ( + "encoding/json" + "sort" + + "github.com/pkg/errors" + solanadb "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + "github.com/smartcontractkit/sqlx" + + "github.com/smartcontractkit/chainlink/core/logger" +) + +type SetupConfig interface { + SolanaNodes() string + LogSQL() bool +} + +// SetupNodes is a hack/shim method to allow node operators to specify multiple nodes via ENV. +// See: https://app.shortcut.com/chainlinklabs/epic/33587/overhaul-config?cf_workflow=500000005&ct_workflow=all +func SetupNodes(db *sqlx.DB, cfg SetupConfig, lggr logger.Logger) (err error) { + str := cfg.SolanaNodes() + if str == "" { + return nil + } + + var nodes []solanadb.Node + if err = json.Unmarshal([]byte(str), &nodes); err != nil { + return errors.Wrapf(err, "invalid SOLANA_NODES json: %q", str) + } + // Sorting gives a consistent insert ordering + sort.Slice(nodes, func(i, j int) bool { + return nodes[i].Name < nodes[j].Name + }) + + lggr.Info("SOLANA_NODES was set; clobbering solana_nodes table") + + orm := NewORM(db, lggr, cfg) + return orm.SetupNodes(nodes, uniqueIDs(nodes)) +} + +func uniqueIDs(ns []solanadb.Node) (ids []string) { + m := map[string]struct{}{} + for _, n := range ns { + id := n.SolanaChainID + if _, ok := m[id]; ok { + continue + } + ids = append(ids, id) + m[id] = struct{}{} + } + return +} diff --git a/core/chains/solana/legacy_test.go b/core/chains/solana/legacy_test.go new file mode 100644 index 00000000000..729f7f3cae0 --- /dev/null +++ b/core/chains/solana/legacy_test.go @@ -0,0 +1,79 @@ +package solana_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + solanadb "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + + "github.com/smartcontractkit/chainlink/core/chains/solana" + "github.com/smartcontractkit/chainlink/core/internal/cltest" + "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/core/logger" +) + +func TestSetupNodes(t *testing.T) { + db := pgtest.NewSqlxDB(t) + + // Insert existing node which will be erased + pgtest.MustExec(t, db, `INSERT INTO solana_chains (id, created_at, updated_at) VALUES ('test-setup',NOW(),NOW())`) + pgtest.MustExec(t, db, `INSERT INTO solana_nodes (name, solana_chain_id, solana_url, created_at, updated_at) VALUES ('foo','test-setup','ws://example.com',NOW(),NOW())`) + + s := ` +[ + { + "name": "mainnet-one", + "terraChainId": "mainnet", + "solanaURL": "ws://test1.invalid" + }, + { + "name": "mainnet-two", + "terraChainId": "mainnet", + "solanaURL": "https://test2.invalid" + }, + { + "name": "testnet-one", + "terraChainId": "testnet", + "solanaURL": "http://test3.invalid" + }, + { + "name": "testnet-two", + "terraChainId": "testnet", + "solanaURL": "http://test4.invalid" + } +] + ` + + cfg := config{ + solanaNodes: s, + } + + err := solana.SetupNodes(db, cfg, logger.TestLogger(t)) + require.NoError(t, err) + + cltest.AssertCount(t, db, "solana_nodes", 4) + + var nodes []solanadb.Node + err = db.Select(&nodes, `SELECT * FROM solana_nodes ORDER BY name ASC`) + require.NoError(t, err) + + require.Len(t, nodes, 4) + + assert.Equal(t, "mainnet-one", nodes[0].Name) + assert.Equal(t, "mainnet-two", nodes[1].Name) + assert.Equal(t, "testnet-one", nodes[2].Name) + assert.Equal(t, "testnet-two", nodes[3].Name) + +} + +type config struct { + solanaNodes string +} + +func (c config) SolanaNodes() string { + return c.solanaNodes +} + +func (c config) LogSQL() bool { return false } diff --git a/core/chains/solana/mocks/chain.go b/core/chains/solana/mocks/chain.go new file mode 100644 index 00000000000..ebf0ab50494 --- /dev/null +++ b/core/chains/solana/mocks/chain.go @@ -0,0 +1,144 @@ +// Code generated by mockery v2.10.1. DO NOT EDIT. + +package mocks + +import ( + client "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" + config "github.com/smartcontractkit/chainlink-solana/pkg/solana/config" + + context "context" + + mock "github.com/stretchr/testify/mock" + + solana "github.com/smartcontractkit/chainlink-solana/pkg/solana" +) + +// Chain is an autogenerated mock type for the Chain type +type Chain struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *Chain) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Config provides a mock function with given fields: +func (_m *Chain) Config() config.Config { + ret := _m.Called() + + var r0 config.Config + if rf, ok := ret.Get(0).(func() config.Config); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Config) + } + } + + return r0 +} + +// Healthy provides a mock function with given fields: +func (_m *Chain) Healthy() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ID provides a mock function with given fields: +func (_m *Chain) ID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Reader provides a mock function with given fields: +func (_m *Chain) Reader() (client.Reader, error) { + ret := _m.Called() + + var r0 client.Reader + if rf, ok := ret.Get(0).(func() client.Reader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Reader) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Ready provides a mock function with given fields: +func (_m *Chain) Ready() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *Chain) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TxManager provides a mock function with given fields: +func (_m *Chain) TxManager() solana.TxManager { + ret := _m.Called() + + var r0 solana.TxManager + if rf, ok := ret.Get(0).(func() solana.TxManager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(solana.TxManager) + } + } + + return r0 +} diff --git a/core/chains/solana/mocks/chain_set.go b/core/chains/solana/mocks/chain_set.go new file mode 100644 index 00000000000..6861915e32a --- /dev/null +++ b/core/chains/solana/mocks/chain_set.go @@ -0,0 +1,94 @@ +// Code generated by mockery v2.10.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + solana "github.com/smartcontractkit/chainlink-solana/pkg/solana" + mock "github.com/stretchr/testify/mock" +) + +// ChainSet is an autogenerated mock type for the ChainSet type +type ChainSet struct { + mock.Mock +} + +// Chain provides a mock function with given fields: ctx, id +func (_m *ChainSet) Chain(ctx context.Context, id string) (solana.Chain, error) { + ret := _m.Called(ctx, id) + + var r0 solana.Chain + if rf, ok := ret.Get(0).(func(context.Context, string) solana.Chain); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(solana.Chain) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Close provides a mock function with given fields: +func (_m *ChainSet) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Healthy provides a mock function with given fields: +func (_m *ChainSet) Healthy() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Ready provides a mock function with given fields: +func (_m *ChainSet) Ready() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *ChainSet) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/core/chains/solana/mocks/reader.go b/core/chains/solana/mocks/reader.go new file mode 100644 index 00000000000..9f7cce69204 --- /dev/null +++ b/core/chains/solana/mocks/reader.go @@ -0,0 +1,147 @@ +// Code generated by mockery v2.10.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + rpc "github.com/gagliardetto/solana-go/rpc" + mock "github.com/stretchr/testify/mock" + + solana "github.com/gagliardetto/solana-go" +) + +// Reader is an autogenerated mock type for the Reader type +type Reader struct { + mock.Mock +} + +// Balance provides a mock function with given fields: addr +func (_m *Reader) Balance(addr solana.PublicKey) (uint64, error) { + ret := _m.Called(addr) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(solana.PublicKey) uint64); ok { + r0 = rf(addr) + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(solana.PublicKey) error); ok { + r1 = rf(addr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChainID provides a mock function with given fields: +func (_m *Reader) ChainID() (string, error) { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountInfoWithOpts provides a mock function with given fields: ctx, addr, opts +func (_m *Reader) GetAccountInfoWithOpts(ctx context.Context, addr solana.PublicKey, opts *rpc.GetAccountInfoOpts) (*rpc.GetAccountInfoResult, error) { + ret := _m.Called(ctx, addr, opts) + + var r0 *rpc.GetAccountInfoResult + if rf, ok := ret.Get(0).(func(context.Context, solana.PublicKey, *rpc.GetAccountInfoOpts) *rpc.GetAccountInfoResult); ok { + r0 = rf(ctx, addr, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.GetAccountInfoResult) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, solana.PublicKey, *rpc.GetAccountInfoOpts) error); ok { + r1 = rf(ctx, addr, opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetFeeForMessage provides a mock function with given fields: msg +func (_m *Reader) GetFeeForMessage(msg string) (uint64, error) { + ret := _m.Called(msg) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(string) uint64); ok { + r0 = rf(msg) + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(msg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestBlockhash provides a mock function with given fields: +func (_m *Reader) LatestBlockhash() (*rpc.GetLatestBlockhashResult, error) { + ret := _m.Called() + + var r0 *rpc.GetLatestBlockhashResult + if rf, ok := ret.Get(0).(func() *rpc.GetLatestBlockhashResult); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.GetLatestBlockhashResult) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SlotHeight provides a mock function with given fields: +func (_m *Reader) SlotHeight() (uint64, error) { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/core/chains/solana/mocks/tx_manager.go b/core/chains/solana/mocks/tx_manager.go new file mode 100644 index 00000000000..9be980952bc --- /dev/null +++ b/core/chains/solana/mocks/tx_manager.go @@ -0,0 +1,28 @@ +// Code generated by mockery v2.10.1. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + solana "github.com/gagliardetto/solana-go" +) + +// TxManager is an autogenerated mock type for the TxManager type +type TxManager struct { + mock.Mock +} + +// Enqueue provides a mock function with given fields: accountID, msg +func (_m *TxManager) Enqueue(accountID string, msg *solana.Transaction) error { + ret := _m.Called(accountID, msg) + + var r0 error + if rf, ok := ret.Get(0).(func(string, *solana.Transaction) error); ok { + r0 = rf(accountID, msg) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/core/chains/solana/monitor/balance.go b/core/chains/solana/monitor/balance.go new file mode 100644 index 00000000000..e9d21f4cac0 --- /dev/null +++ b/core/chains/solana/monitor/balance.go @@ -0,0 +1,138 @@ +// TODO: Improve code reuse (mostly c/p of core/chains/terra/monitor/balance.go) +package monitor + +import ( + "context" + "time" + + "github.com/gagliardetto/solana-go" + + solanaClient "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" + + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services" + "github.com/smartcontractkit/chainlink/core/services/keystore/keys/solkey" + "github.com/smartcontractkit/chainlink/core/utils" +) + +// Config defines the monitor configuration. +type Config interface { + BalancePollPeriod() time.Duration +} + +// Keystore provides the keys to be monitored. +type Keystore interface { + GetAll() ([]solkey.Key, error) +} + +// NewBalanceMonitor returns a balance monitoring services.Service which reports the luna balance of all ks keys to prometheus. +func NewBalanceMonitor(chainID string, cfg Config, lggr logger.Logger, ks Keystore, newReader func() (solanaClient.Reader, error)) services.ServiceCtx { + return newBalanceMonitor(chainID, cfg, lggr, ks, newReader) +} + +func newBalanceMonitor(chainID string, cfg Config, lggr logger.Logger, ks Keystore, newReader func() (solanaClient.Reader, error)) *balanceMonitor { + b := balanceMonitor{ + chainID: chainID, + cfg: cfg, + lggr: lggr.Named("BalanceMonitor"), + ks: ks, + newReader: newReader, + stop: make(chan struct{}), + done: make(chan struct{}), + } + b.updateFn = b.updateProm + return &b +} + +type balanceMonitor struct { + utils.StartStopOnce + chainID string + cfg Config + lggr logger.Logger + ks Keystore + newReader func() (solanaClient.Reader, error) + updateFn func(acc solana.PublicKey, lamports uint64) // overridable for testing + + reader solanaClient.Reader + + stop, done chan struct{} +} + +func (b *balanceMonitor) Start(context.Context) error { + return b.StartOnce("SolanaBalanceMonitor", func() error { + go b.monitor() + return nil + }) +} + +func (b *balanceMonitor) Close() error { + return b.StopOnce("SolanaBalanceMonitor", func() error { + close(b.stop) + <-b.done + return nil + }) +} + +func (b *balanceMonitor) monitor() { + defer close(b.done) + + tick := time.After(utils.WithJitter(b.cfg.BalancePollPeriod())) + for { + select { + case <-b.stop: + return + case <-tick: + b.updateBalances() + tick = time.After(utils.WithJitter(b.cfg.BalancePollPeriod())) + } + } +} + +// getReader returns the cached solanaClient.Reader, or creates a new one if nil. +func (b *balanceMonitor) getReader() (solanaClient.Reader, error) { + if b.reader == nil { + var err error + b.reader, err = b.newReader() + if err != nil { + return nil, err + } + } + return b.reader, nil +} + +func (b *balanceMonitor) updateBalances() { + keys, err := b.ks.GetAll() + if err != nil { + b.lggr.Errorw("Failed to get keys", "err", err) + return + } + if len(keys) == 0 { + return + } + reader, err := b.getReader() + if err != nil { + b.lggr.Errorw("Failed to get client", "err", err) + return + } + var gotSomeBals bool + for _, k := range keys { + // Check for shutdown signal, since Balance blocks and may be slow. + select { + case <-b.stop: + return + default: + } + acc := k.PublicKey() + lamports, err := reader.Balance(acc) + if err != nil { + b.lggr.Errorw("Failed to get balance", "account", acc.String(), "err", err) + continue + } + gotSomeBals = true + b.updateFn(acc, lamports) + } + if !gotSomeBals { + // Try a new client next time. + b.reader = nil + } +} diff --git a/core/chains/solana/monitor/balance_test.go b/core/chains/solana/monitor/balance_test.go new file mode 100644 index 00000000000..5f130aae5c4 --- /dev/null +++ b/core/chains/solana/monitor/balance_test.go @@ -0,0 +1,89 @@ +package monitor + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/gagliardetto/solana-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + solanaRelay "github.com/smartcontractkit/chainlink-solana/pkg/solana" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/client/mocks" + + "github.com/smartcontractkit/chainlink/core/internal/testutils" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/keystore/keys/solkey" +) + +func TestBalanceMonitor(t *testing.T) { + const chainID = "Chainlinktest-42" + ks := keystore{} + for i := 0; i < 3; i++ { + k, err := solkey.New() + assert.NoError(t, err) + ks = append(ks, k) + } + + bals := []uint64{0, 1, 1_000_000_000} + expBals := []string{ + "0.000000000", + "0.000000001", + "1.000000000", + } + + client := new(mocks.ReaderWriter) + type update struct{ acc, bal string } + var exp []update + for i := range bals { + acc := ks[i].PublicKey() + client.On("Balance", acc).Return(bals[i], nil) + exp = append(exp, update{acc.String(), expBals[i]}) + } + cfg := &config{balancePollPeriod: time.Second} + b := newBalanceMonitor(chainID, cfg, logger.TestLogger(t), ks, nil) + var got []update + done := make(chan struct{}) + b.updateFn = func(acc solana.PublicKey, lamports uint64) { + select { + case <-done: + return + default: + } + v := solanaRelay.LamportsToSol(lamports) // convert from lamports to SOL + got = append(got, update{acc.String(), fmt.Sprintf("%.9f", v)}) + if len(got) == len(exp) { + close(done) + } + } + b.reader = client + + require.NoError(t, b.Start(context.Background())) + t.Cleanup(func() { + assert.NoError(t, b.Close()) + client.AssertExpectations(t) + }) + select { + case <-time.After(testutils.WaitTimeout(t)): + t.Fatal("timed out waiting for balance monitor") + case <-done: + } + + assert.EqualValues(t, exp, got) +} + +type config struct { + balancePollPeriod time.Duration +} + +func (c *config) BalancePollPeriod() time.Duration { + return c.balancePollPeriod +} + +type keystore []solkey.Key + +func (k keystore) GetAll() ([]solkey.Key, error) { + return k, nil +} diff --git a/core/chains/solana/monitor/prom.go b/core/chains/solana/monitor/prom.go new file mode 100644 index 00000000000..ebc476816a7 --- /dev/null +++ b/core/chains/solana/monitor/prom.go @@ -0,0 +1,18 @@ +package monitor + +import ( + "github.com/gagliardetto/solana-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + solanaRelay "github.com/smartcontractkit/chainlink-solana/pkg/solana" +) + +var promSolanaBalance = promauto.NewGaugeVec( + prometheus.GaugeOpts{Name: "solana_balance", Help: "Solana account balances"}, + []string{"account", "chainID", "chainSet", "denomination"}, +) + +func (b *balanceMonitor) updateProm(acc solana.PublicKey, lamports uint64) { + v := solanaRelay.LamportsToSol(lamports) // convert from lamports to SOL + promSolanaBalance.WithLabelValues(acc.String(), b.chainID, "solana", "SOL").Set(v) +} diff --git a/core/chains/solana/monitor/prom_test.go b/core/chains/solana/monitor/prom_test.go new file mode 100644 index 00000000000..d1646e36874 --- /dev/null +++ b/core/chains/solana/monitor/prom_test.go @@ -0,0 +1,21 @@ +package monitor + +import ( + "testing" + + "github.com/gagliardetto/solana-go" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" +) + +func TestPromSolBalance(t *testing.T) { + key := solana.PublicKey{} + balance := uint64(1_000_000_000) + + monitor := balanceMonitor{chainID: "test-chain"} + monitor.updateProm(key, balance) + + // happy path test + promBalance := testutil.ToFloat64(promSolanaBalance.WithLabelValues(key.String(), monitor.chainID, "solana", "SOL")) + assert.Equal(t, float64(balance)/float64(solana.LAMPORTS_PER_SOL), promBalance) +} diff --git a/core/chains/solana/orm.go b/core/chains/solana/orm.go new file mode 100644 index 00000000000..a76f3631662 --- /dev/null +++ b/core/chains/solana/orm.go @@ -0,0 +1,38 @@ +package solana + +import ( + "github.com/smartcontractkit/sqlx" + + soldb "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + + "github.com/smartcontractkit/chainlink/core/chains" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/pg" +) + +type Chain = chains.Chain[string, soldb.ChainCfg] + +// ORM manages solana chains and nodes. +type ORM interface { + Chain(string, ...pg.QOpt) (Chain, error) + Chains(offset, limit int, qopts ...pg.QOpt) ([]Chain, int, error) + CreateChain(id string, config soldb.ChainCfg, qopts ...pg.QOpt) (Chain, error) + UpdateChain(id string, enabled bool, config soldb.ChainCfg, qopts ...pg.QOpt) (Chain, error) + DeleteChain(id string, qopts ...pg.QOpt) error + EnabledChains(...pg.QOpt) ([]Chain, error) + + CreateNode(soldb.Node, ...pg.QOpt) (soldb.Node, error) + DeleteNode(int32, ...pg.QOpt) error + Node(int32, ...pg.QOpt) (soldb.Node, error) + NodeNamed(string, ...pg.QOpt) (soldb.Node, error) + Nodes(offset, limit int, qopts ...pg.QOpt) (nodes []soldb.Node, count int, err error) + NodesForChain(chainID string, offset, limit int, qopts ...pg.QOpt) (nodes []soldb.Node, count int, err error) + + SetupNodes([]soldb.Node, []string) error +} + +// NewORM returns an ORM backed by db. +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.LogConfig) ORM { + q := pg.NewQ(db, lggr.Named("ORM"), cfg) + return chains.NewORM[string, soldb.ChainCfg, soldb.Node](q, "solana", "solana_url") +} diff --git a/core/chains/solana/orm_test.go b/core/chains/solana/orm_test.go new file mode 100644 index 00000000000..b21821cd025 --- /dev/null +++ b/core/chains/solana/orm_test.go @@ -0,0 +1,114 @@ +// TODO: Improve code reuse (mostly c/p of core/chains/terra/orm_test.go) +package solana_test + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + "github.com/smartcontractkit/sqlx" + + "github.com/smartcontractkit/chainlink/core/chains/solana" + "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/core/logger" +) + +func setupORM(t *testing.T) (*sqlx.DB, solana.ORM) { + t.Helper() + + db := pgtest.NewSqlxDB(t) + orm := solana.NewORM(db, logger.TestLogger(t), pgtest.NewPGCfg(true)) + + return db, orm +} + +func Test_ORM(t *testing.T) { + _, orm := setupORM(t) + + dbcs, err := orm.EnabledChains() + require.NoError(t, err) + require.Empty(t, dbcs) + + chainIDA := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) + _, err = orm.CreateChain(chainIDA, db.ChainCfg{}) + require.NoError(t, err) + chainIDB := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) + _, err = orm.CreateChain(chainIDB, db.ChainCfg{}) + require.NoError(t, err) + + dbcs, err = orm.EnabledChains() + require.NoError(t, err) + require.Len(t, dbcs, 2) + + newNode := db.Node{ + Name: "first", + SolanaChainID: chainIDA, + SolanaURL: "http://tender.mint.test/columbus-5", + } + gotNode, err := orm.CreateNode(newNode) + require.NoError(t, err) + assertEqual(t, newNode, gotNode) + + gotNode, err = orm.Node(gotNode.ID) + require.NoError(t, err) + assertEqual(t, newNode, gotNode) + + newNode2 := db.Node{ + Name: "second", + SolanaChainID: chainIDB, + SolanaURL: "http://tender.mint.test/bombay-12", + } + gotNode2, err := orm.CreateNode(newNode2) + require.NoError(t, err) + assertEqual(t, newNode2, gotNode2) + + gotNodes, count, err := orm.Nodes(0, 3) + require.NoError(t, err) + require.Equal(t, 2, count) + if assert.Len(t, gotNodes, 2) { + assertEqual(t, newNode, gotNodes[0]) + assertEqual(t, newNode2, gotNodes[1]) + } + + gotNodes, count, err = orm.NodesForChain(newNode2.SolanaChainID, 0, 3) + require.NoError(t, err) + require.Equal(t, 1, count) + if assert.Len(t, gotNodes, 1) { + assertEqual(t, newNode2, gotNodes[0]) + } + + err = orm.DeleteNode(gotNode.ID) + require.NoError(t, err) + + gotNodes, count, err = orm.Nodes(0, 3) + require.NoError(t, err) + require.Equal(t, 1, count) + if assert.Len(t, gotNodes, 1) { + assertEqual(t, newNode2, gotNodes[0]) + } + + newNode3 := db.Node{ + Name: "third", + SolanaChainID: chainIDB, + SolanaURL: "http://tender.mint.test/bombay-12", + } + gotNode3, err := orm.CreateNode(newNode3) + require.NoError(t, err) + assertEqual(t, newNode3, gotNode3) + + gotNamed, err := orm.NodeNamed("third") + require.NoError(t, err) + assertEqual(t, newNode3, gotNamed) +} + +func assertEqual(t *testing.T, newNode db.Node, gotNode db.Node) { + t.Helper() + + assert.Equal(t, newNode.Name, gotNode.Name) + assert.Equal(t, newNode.SolanaChainID, gotNode.SolanaChainID) + assert.Equal(t, newNode.SolanaURL, gotNode.SolanaURL) +} diff --git a/core/chains/solana/soltxm/txm.go b/core/chains/solana/soltxm/txm.go new file mode 100644 index 00000000000..0711d744258 --- /dev/null +++ b/core/chains/solana/soltxm/txm.go @@ -0,0 +1,116 @@ +package soltxm + +import ( + "context" + + solanaGo "github.com/gagliardetto/solana-go" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana" + solanaClient "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/config" + + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services" + "github.com/smartcontractkit/chainlink/core/utils" +) + +const MaxQueueLen = 1000 + +var ( + _ services.ServiceCtx = (*Txm)(nil) + _ solana.TxManager = (*Txm)(nil) +) + +// Txm manages transactions for the solana blockchain. +// simple implementation with no persistently stored txs +type Txm struct { + starter utils.StartStopOnce + lggr logger.Logger + tc func() (solanaClient.ReaderWriter, error) + queue chan *solanaGo.Transaction + stop, done chan struct{} + cfg config.Config +} + +// NewTxm creates a txm. Uses simulation so should only be used to send txes to trusted contracts i.e. OCR. +func NewTxm(tc func() (solanaClient.ReaderWriter, error), cfg config.Config, lggr logger.Logger) *Txm { + lggr = lggr.Named("Txm") + return &Txm{ + starter: utils.StartStopOnce{}, + tc: tc, + lggr: lggr, + queue: make(chan *solanaGo.Transaction, MaxQueueLen), // queue can support 1000 pending txs + stop: make(chan struct{}), + done: make(chan struct{}), + cfg: cfg, + } +} + +// Start subscribes to queuing channel and processes them. +func (txm *Txm) Start(context.Context) error { + return txm.starter.StartOnce("solanatxm", func() error { + go txm.run() + return nil + }) +} + +func (txm *Txm) run() { + defer close(txm.done) + ctx, cancel := utils.ContextFromChan(txm.stop) + defer cancel() + for { + select { + case tx := <-txm.queue: + // TODO: this section could be better optimized for sending TXs quickly + // fetch client + client, err := txm.tc() + if err != nil { + txm.lggr.Errorw("failed to get client", "err", err) + continue + } + // process tx + sig, err := client.SendTx(ctx, tx) + if err != nil { + txm.lggr.Criticalw("failed to send transaction", "err", err) + continue + } + txm.lggr.Debugw("successfully sent transaction", "signature", sig.String()) + case <-txm.stop: + return + } + } +} + +// TODO: transaction confirmation +// use ConfirmPollPeriod() in config + +// Enqueue enqueue a msg destined for the solana chain. +func (txm *Txm) Enqueue(accountID string, msg *solanaGo.Transaction) error { + select { + case txm.queue <- msg: + default: + txm.lggr.Errorw("failed to enqeue tx", "queueLength", len(txm.queue), "tx", msg) + return errors.Errorf("failed to enqueue transaction for %s", accountID) + } + return nil +} + +// Close close service +func (txm *Txm) Close() error { + return txm.starter.StopOnce("solanatxm", func() error { + close(txm.stop) + <-txm.done + return nil + }) +} + +// Healthy service is healthy +func (txm *Txm) Healthy() error { + return nil +} + +// Ready service is ready +func (txm *Txm) Ready() error { + return nil +} diff --git a/core/chains/solana/soltxm/txm_test.go b/core/chains/solana/soltxm/txm_test.go new file mode 100644 index 00000000000..cbcef4511c4 --- /dev/null +++ b/core/chains/solana/soltxm/txm_test.go @@ -0,0 +1,93 @@ +//go:build integration + +package soltxm_test + +import ( + "context" + "testing" + "time" + + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/programs/system" + solanaClient "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/config" + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + "github.com/smartcontractkit/chainlink/core/chains/solana/soltxm" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTxm_Integration(t *testing.T) { + url := solanaClient.SetupLocalSolNode(t) + privKey, err := solana.NewRandomPrivateKey() + require.NoError(t, err) + pubKey := privKey.PublicKey() + solanaClient.FundTestAccounts(t, []solana.PublicKey{pubKey}, url) + + // set up txm + lggr := logger.TestLogger(t) + cfg := config.NewConfig(db.ChainCfg{}, lggr) + client, err := solanaClient.NewClient(url, cfg, 2*time.Second, lggr) + require.NoError(t, err) + getClient := func() (solanaClient.ReaderWriter, error) { + return client, nil + } + txm := soltxm.NewTxm(getClient, cfg, lggr) + + // track initial balance + initBal, err := client.Balance(pubKey) + assert.NoError(t, err) + assert.NotEqual(t, uint64(0), initBal) // should be funded + + // start + require.NoError(t, txm.Start(context.Background())) + + // already started + assert.Error(t, txm.Start(context.Background())) + + // create receiver + privKeyReceiver, err := solana.NewRandomPrivateKey() + pubKeyReceiver := privKeyReceiver.PublicKey() + + // create transfer tx + hash, err := client.LatestBlockhash() + assert.NoError(t, err) + tx, err := solana.NewTransaction( + []solana.Instruction{ + system.NewTransferInstruction( + solana.LAMPORTS_PER_SOL, + pubKey, + pubKeyReceiver, + ).Build(), + }, + hash.Value.Blockhash, + solana.TransactionPayer(pubKey), + ) + assert.NoError(t, err) + + // sign tx + _, err = tx.Sign( + func(key solana.PublicKey) *solana.PrivateKey { + if privKey.PublicKey().Equals(key) { + return &privKey + } + return nil + }, + ) + assert.NoError(t, err) + + // enqueue tx + assert.NoError(t, txm.Enqueue("testTransmission", tx)) + time.Sleep(time.Second) // wait for tx + + // check balance changes + senderBal, err := client.Balance(pubKey) + assert.NoError(t, err) + assert.Greater(t, initBal, senderBal) + assert.Greater(t, initBal-senderBal, solana.LAMPORTS_PER_SOL) // balance change = sent + fees + + receiverBal, err := client.Balance(pubKeyReceiver) + assert.NoError(t, err) + assert.Equal(t, solana.LAMPORTS_PER_SOL, receiverBal) +} diff --git a/core/chains/terra/chain.go b/core/chains/terra/chain.go index 93bdb25dae0..ca90c375e17 100644 --- a/core/chains/terra/chain.go +++ b/core/chains/terra/chain.go @@ -50,7 +50,7 @@ type chain struct { } // NewChain returns a new chain backed by node. -func NewChain(db *sqlx.DB, ks keystore.Terra, logCfg pg.LogConfig, eb pg.EventBroadcaster, dbchain db.Chain, orm types.ORM, lggr logger.Logger) (*chain, error) { +func NewChain(db *sqlx.DB, ks keystore.Terra, logCfg pg.LogConfig, eb pg.EventBroadcaster, dbchain types.Chain, orm types.ORM, lggr logger.Logger) (*chain, error) { cfg := terra.NewConfig(dbchain.Cfg, lggr) lggr = lggr.With("terraChainID", dbchain.ID) var ch = chain{ diff --git a/core/chains/terra/chain_set.go b/core/chains/terra/chain_set.go index 487494f3403..9f12a9ce010 100644 --- a/core/chains/terra/chain_set.go +++ b/core/chains/terra/chain_set.go @@ -63,7 +63,7 @@ func (o ChainSetOpts) validate() (err error) { return } -func (o ChainSetOpts) newChain(dbchain db.Chain) (*chain, error) { +func (o ChainSetOpts) newChain(dbchain types.Chain) (*chain, error) { if !dbchain.Enabled { return nil, errors.Errorf("cannot create new chain with ID %s, the chain is disabled", dbchain.ID) } @@ -74,9 +74,9 @@ func (o ChainSetOpts) newChain(dbchain db.Chain) (*chain, error) { type ChainSet interface { terra.ChainSet - Add(context.Context, string, db.ChainCfg) (db.Chain, error) + Add(context.Context, string, db.ChainCfg) (types.Chain, error) Remove(string) error - Configure(ctx context.Context, id string, enabled bool, config db.ChainCfg) (db.Chain, error) + Configure(ctx context.Context, id string, enabled bool, config db.ChainCfg) (types.Chain, error) ORM() types.ORM } @@ -168,23 +168,23 @@ func (c *chainSet) Chain(ctx context.Context, id string) (terra.Chain, error) { return c.chains[id], nil } -func (c *chainSet) Add(ctx context.Context, id string, config db.ChainCfg) (db.Chain, error) { +func (c *chainSet) Add(ctx context.Context, id string, config db.ChainCfg) (types.Chain, error) { c.chainsMu.Lock() defer c.chainsMu.Unlock() if _, exists := c.chains[id]; exists { - return db.Chain{}, errors.Errorf("chain already exists with id %s", id) + return types.Chain{}, errors.Errorf("chain already exists with id %s", id) } dbchain, err := c.opts.ORM.CreateChain(id, config) if err != nil { - return db.Chain{}, err + return types.Chain{}, err } return dbchain, c.initializeChain(ctx, &dbchain) } // Requires a lock on chainsMu -func (c *chainSet) initializeChain(ctx context.Context, dbchain *db.Chain) error { +func (c *chainSet) initializeChain(ctx context.Context, dbchain *types.Chain) error { // Start it cid := dbchain.ID chain, err := c.opts.newChain(*dbchain) @@ -215,14 +215,14 @@ func (c *chainSet) Remove(id string) error { return chain.Close() } -func (c *chainSet) Configure(ctx context.Context, id string, enabled bool, config db.ChainCfg) (db.Chain, error) { +func (c *chainSet) Configure(ctx context.Context, id string, enabled bool, config db.ChainCfg) (types.Chain, error) { c.chainsMu.Lock() defer c.chainsMu.Unlock() // Update configuration stored in the database dbchain, err := c.opts.ORM.UpdateChain(id, enabled, config) if err != nil { - return db.Chain{}, err + return types.Chain{}, err } chain, exists := c.chains[id] @@ -231,7 +231,7 @@ func (c *chainSet) Configure(ctx context.Context, id string, enabled bool, confi case exists && !enabled: // Chain was toggled to disabled delete(c.chains, id) - return db.Chain{}, chain.Close() + return types.Chain{}, chain.Close() case !exists && enabled: // Chain was toggled to enabled return dbchain, c.initializeChain(ctx, &dbchain) diff --git a/core/chains/terra/legacy.go b/core/chains/terra/legacy.go new file mode 100644 index 00000000000..dc755ed30ef --- /dev/null +++ b/core/chains/terra/legacy.go @@ -0,0 +1,54 @@ +package terra + +import ( + "encoding/json" + "sort" + + "github.com/pkg/errors" + + terradb "github.com/smartcontractkit/chainlink-terra/pkg/terra/db" + "github.com/smartcontractkit/sqlx" + + "github.com/smartcontractkit/chainlink/core/logger" +) + +type SetupConfig interface { + TerraNodes() string + LogSQL() bool +} + +// SetupNodes is a hack/shim method to allow node operators to specify multiple nodes via ENV. +// See: https://app.shortcut.com/chainlinklabs/epic/33587/overhaul-config?cf_workflow=500000005&ct_workflow=all +func SetupNodes(db *sqlx.DB, cfg SetupConfig, lggr logger.Logger) (err error) { + str := cfg.TerraNodes() + if str == "" { + return nil + } + + var nodes []terradb.Node + if err = json.Unmarshal([]byte(str), &nodes); err != nil { + return errors.Wrapf(err, "invalid TERRA_NODES json: %q", str) + } + // Sorting gives a consistent insert ordering + sort.Slice(nodes, func(i, j int) bool { + return nodes[i].Name < nodes[j].Name + }) + + lggr.Info("TERRA_NODES was set; clobbering terra_nodes table") + + orm := NewORM(db, lggr, cfg) + return orm.SetupNodes(nodes, uniqueIDs(nodes)) +} + +func uniqueIDs(ns []terradb.Node) (ids []string) { + m := map[string]struct{}{} + for _, n := range ns { + id := n.TerraChainID + if _, ok := m[id]; ok { + continue + } + ids = append(ids, id) + m[id] = struct{}{} + } + return +} diff --git a/core/chains/terra/legacy_test.go b/core/chains/terra/legacy_test.go new file mode 100644 index 00000000000..247ba071922 --- /dev/null +++ b/core/chains/terra/legacy_test.go @@ -0,0 +1,79 @@ +package terra_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + terradb "github.com/smartcontractkit/chainlink-terra/pkg/terra/db" + + "github.com/smartcontractkit/chainlink/core/chains/terra" + "github.com/smartcontractkit/chainlink/core/internal/cltest" + "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/core/logger" +) + +func TestSetupNodes(t *testing.T) { + db := pgtest.NewSqlxDB(t) + + // Insert existing node which will be erased + pgtest.MustExec(t, db, `INSERT INTO terra_chains (id, created_at, updated_at) VALUES ('test-setup',NOW(),NOW())`) + pgtest.MustExec(t, db, `INSERT INTO terra_nodes (name, terra_chain_id, tendermint_url, created_at, updated_at) VALUES ('foo','test-setup','ws://example.com',NOW(),NOW())`) + + s := ` +[ + { + "name": "bombay-one", + "terraChainId": "bombay", + "tendermintURL": "ws://test1.invalid" + }, + { + "name": "bombay-two", + "terraChainId": "bombay", + "tendermintURL": "https://test2.invalid" + }, + { + "name": "columbus-one", + "terraChainId": "columbus", + "tendermintURL": "http://test3.invalid" + }, + { + "name": "columbus-two", + "terraChainId": "columbus", + "tendermintURL": "http://test4.invalid" + } +] + ` + + cfg := config{ + terraNodes: s, + } + + err := terra.SetupNodes(db, cfg, logger.TestLogger(t)) + require.NoError(t, err) + + cltest.AssertCount(t, db, "terra_nodes", 4) + + var nodes []terradb.Node + err = db.Select(&nodes, `SELECT * FROM terra_nodes ORDER BY name ASC`) + require.NoError(t, err) + + require.Len(t, nodes, 4) + + assert.Equal(t, "bombay-one", nodes[0].Name) + assert.Equal(t, "bombay-two", nodes[1].Name) + assert.Equal(t, "columbus-one", nodes[2].Name) + assert.Equal(t, "columbus-two", nodes[3].Name) + +} + +type config struct { + terraNodes string +} + +func (c config) TerraNodes() string { + return c.terraNodes +} + +func (c config) LogSQL() bool { return false } diff --git a/core/chains/terra/mocks/chain.go b/core/chains/terra/mocks/chain.go index 56cf8ffd742..0d05703c4c2 100644 --- a/core/chains/terra/mocks/chain.go +++ b/core/chains/terra/mocks/chain.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/terra/mocks/chain_set.go b/core/chains/terra/mocks/chain_set.go index 261444e8e6f..5fc11f8cf81 100644 --- a/core/chains/terra/mocks/chain_set.go +++ b/core/chains/terra/mocks/chain_set.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/terra/mocks/reader.go b/core/chains/terra/mocks/reader.go index ff241b26319..24fb8fe8be7 100644 --- a/core/chains/terra/mocks/reader.go +++ b/core/chains/terra/mocks/reader.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/terra/mocks/tx_manager.go b/core/chains/terra/mocks/tx_manager.go index a92fa73fadb..e206585df1a 100644 --- a/core/chains/terra/mocks/tx_manager.go +++ b/core/chains/terra/mocks/tx_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/chains/terra/orm.go b/core/chains/terra/orm.go index 3ed72494e14..8abf8f489ab 100644 --- a/core/chains/terra/orm.go +++ b/core/chains/terra/orm.go @@ -1,157 +1,18 @@ package terra import ( - "database/sql" - "github.com/smartcontractkit/sqlx" - "github.com/smartcontractkit/chainlink-terra/pkg/terra/db" + terradb "github.com/smartcontractkit/chainlink-terra/pkg/terra/db" + "github.com/smartcontractkit/chainlink/core/chains" "github.com/smartcontractkit/chainlink/core/chains/terra/types" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/pg" ) -type orm struct { - q pg.Q -} - -var _ types.ORM = (*orm)(nil) - // NewORM returns an ORM backed by db. func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.LogConfig) types.ORM { - return &orm{q: pg.NewQ(db, lggr.Named("ORM"), cfg)} -} - -func (o *orm) Chain(id string, qopts ...pg.QOpt) (dbchain db.Chain, err error) { - q := o.q.WithOpts(qopts...) - chainSQL := `SELECT * FROM terra_chains WHERE id = $1;` - err = q.Get(&dbchain, chainSQL, id) - return -} - -func (o *orm) CreateChain(id string, config db.ChainCfg, qopts ...pg.QOpt) (chain db.Chain, err error) { - q := o.q.WithOpts(qopts...) - sql := `INSERT INTO terra_chains (id, cfg, created_at, updated_at) VALUES ($1, $2, now(), now()) RETURNING *` - err = q.Get(&chain, sql, id, config) - return -} - -func (o *orm) UpdateChain(id string, enabled bool, config db.ChainCfg, qopts ...pg.QOpt) (chain db.Chain, err error) { - q := o.q.WithOpts(qopts...) - sql := `UPDATE terra_chains SET enabled = $1, cfg = $2, updated_at = now() WHERE id = $3 RETURNING *` - err = q.Get(&chain, sql, enabled, config, id) - return -} - -func (o *orm) DeleteChain(id string, qopts ...pg.QOpt) error { - q := o.q.WithOpts(qopts...) - query := `DELETE FROM terra_chains WHERE id = $1` - result, err := q.Exec(query, id) - if err != nil { - return err - } - rowsAffected, err := result.RowsAffected() - if err != nil { - return err - } - if rowsAffected == 0 { - return sql.ErrNoRows - } - return nil -} - -func (o *orm) Chains(offset, limit int, qopts ...pg.QOpt) (chains []db.Chain, count int, err error) { - q := o.q.WithOpts(qopts...) - if err = q.Get(&count, "SELECT COUNT(*) FROM terra_chains"); err != nil { - return - } - - sql := `SELECT * FROM terra_chains ORDER BY created_at, id LIMIT $1 OFFSET $2;` - if err = q.Select(&chains, sql, limit, offset); err != nil { - return - } - - return -} - -func (o *orm) EnabledChains(qopts ...pg.QOpt) (chains []db.Chain, err error) { - q := o.q.WithOpts(qopts...) - chainsSQL := `SELECT * FROM terra_chains WHERE enabled ORDER BY created_at, id;` - if err = q.Select(&chains, chainsSQL); err != nil { - return - } - return -} - -func (o *orm) CreateNode(data types.NewNode, qopts ...pg.QOpt) (node db.Node, err error) { - q := o.q.WithOpts(qopts...) - sql := `INSERT INTO terra_nodes (name, terra_chain_id, tendermint_url, created_at, updated_at) - VALUES (:name, :terra_chain_id, :tendermint_url, now(), now()) - RETURNING *;` - stmt, err := q.PrepareNamed(sql) - if err != nil { - return node, err - } - err = stmt.Get(&node, data) - return node, err -} - -func (o *orm) DeleteNode(id int32, qopts ...pg.QOpt) error { - q := o.q.WithOpts(qopts...) - query := `DELETE FROM terra_nodes WHERE id = $1` - result, err := q.Exec(query, id) - if err != nil { - return err - } - rowsAffected, err := result.RowsAffected() - if err != nil { - return err - } - if rowsAffected == 0 { - return sql.ErrNoRows - } - return nil -} - -func (o *orm) Node(id int32, qopts ...pg.QOpt) (node db.Node, err error) { - q := o.q.WithOpts(qopts...) - err = q.Get(&node, "SELECT * FROM terra_nodes WHERE id = $1;", id) - - return -} - -func (o *orm) NodeNamed(name string, qopts ...pg.QOpt) (node db.Node, err error) { - q := o.q.WithOpts(qopts...) - err = q.Get(&node, "SELECT * FROM terra_nodes WHERE name = $1;", name) - - return -} - -func (o *orm) Nodes(offset, limit int, qopts ...pg.QOpt) (nodes []db.Node, count int, err error) { - q := o.q.WithOpts(qopts...) - if err = q.Get(&count, "SELECT COUNT(*) FROM terra_nodes"); err != nil { - return - } - - sql := `SELECT * FROM terra_nodes ORDER BY created_at, id LIMIT $1 OFFSET $2;` - if err = q.Select(&nodes, sql, limit, offset); err != nil { - return - } - - return -} - -func (o *orm) NodesForChain(chainID string, offset, limit int, qopts ...pg.QOpt) (nodes []db.Node, count int, err error) { - q := o.q.WithOpts(qopts...) - if err = q.Get(&count, "SELECT COUNT(*) FROM terra_nodes WHERE terra_chain_id = $1", chainID); err != nil { - return - } - - sql := `SELECT * FROM terra_nodes WHERE terra_chain_id = $1 ORDER BY created_at, id LIMIT $2 OFFSET $3;` - if err = q.Select(&nodes, sql, chainID, limit, offset); err != nil { - return - } - - return + q := pg.NewQ(db, lggr.Named("ORM"), cfg) + return chains.NewORM[string, terradb.ChainCfg, terradb.Node](q, "terra", "tendermint_url") } diff --git a/core/chains/terra/orm_test.go b/core/chains/terra/orm_test.go index 14bab128c6d..0734bf7bb22 100644 --- a/core/chains/terra/orm_test.go +++ b/core/chains/terra/orm_test.go @@ -44,7 +44,7 @@ func Test_ORM(t *testing.T) { require.NoError(t, err) require.Len(t, dbcs, 2) - newNode := types.NewNode{ + newNode := db.Node{ Name: "first", TerraChainID: chainIDA, TendermintURL: "http://tender.mint.test/columbus-5", @@ -57,7 +57,7 @@ func Test_ORM(t *testing.T) { require.NoError(t, err) assertEqual(t, newNode, gotNode) - newNode2 := types.NewNode{ + newNode2 := db.Node{ Name: "second", TerraChainID: chainIDB, TendermintURL: "http://tender.mint.test/bombay-12", @@ -91,7 +91,7 @@ func Test_ORM(t *testing.T) { assertEqual(t, newNode2, gotNodes[0]) } - newNode3 := types.NewNode{ + newNode3 := db.Node{ Name: "third", TerraChainID: chainIDB, TendermintURL: "http://tender.mint.test/bombay-12", @@ -108,7 +108,7 @@ func Test_ORM(t *testing.T) { assert.NoError(t, orm.DeleteChain(chainIDB)) } -func assertEqual(t *testing.T, newNode types.NewNode, gotNode db.Node) { +func assertEqual(t *testing.T, newNode db.Node, gotNode db.Node) { t.Helper() assert.Equal(t, newNode.Name, gotNode.Name) diff --git a/core/chains/terra/terratxm/orm.go b/core/chains/terra/terratxm/orm.go index 7bbc45c09a7..bbbef28ee69 100644 --- a/core/chains/terra/terratxm/orm.go +++ b/core/chains/terra/terratxm/orm.go @@ -30,15 +30,28 @@ func NewORM(chainID string, db *sqlx.DB, lggr logger.Logger, cfg pg.LogConfig) * } // InsertMsg inserts a terra msg, assumed to be a serialized terra ExecuteContractMsg. -func (o *ORM) InsertMsg(contractID, typeURL string, msg []byte) (int64, error) { +func (o *ORM) InsertMsg(contractID, typeURL string, msg []byte, qopts ...pg.QOpt) (int64, error) { var tm terra.Msg - err := o.q.Get(&tm, `INSERT INTO terra_msgs (contract_id, type, raw, state, terra_chain_id, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) RETURNING *`, contractID, typeURL, msg, db.Unstarted, o.chainID) + q := o.q.WithOpts(qopts...) + err := q.Get(&tm, `INSERT INTO terra_msgs (contract_id, type, raw, state, terra_chain_id, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) RETURNING *`, contractID, typeURL, msg, db.Unstarted, o.chainID) if err != nil { return 0, err } return tm.ID, nil } +// UpdateMsgsContract updates messages for the given contract. +func (o *ORM) UpdateMsgsContract(contractID string, from, to db.State, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + _, err := q.Exec(`UPDATE terra_msgs SET state = $1, updated_at = NOW() + WHERE terra_chain_id = $2 AND contract_id = $3 AND state = $4`, to, o.chainID, contractID, from) + if err != nil { + return err + } + return nil +} + // GetMsgsState returns the oldest messages with a given state up to limit. func (o *ORM) GetMsgsState(state db.State, limit int64, qopts ...pg.QOpt) (terra.Msgs, error) { if limit < 1 { diff --git a/core/chains/terra/terratxm/orm_test.go b/core/chains/terra/terratxm/orm_test.go index 3b5d10da4dd..5bc92c69612 100644 --- a/core/chains/terra/terratxm/orm_test.go +++ b/core/chains/terra/terratxm/orm_test.go @@ -62,6 +62,8 @@ func TestORM(t *testing.T) { // Update txHash := "123" + err = o.UpdateMsgs([]int64{mid}, Started, &txHash) + require.NoError(t, err) err = o.UpdateMsgs([]int64{mid}, Broadcasted, &txHash) require.NoError(t, err) broadcasted, err := o.GetMsgsState(Broadcasted, 5) diff --git a/core/chains/terra/terratxm/txm.go b/core/chains/terra/terratxm/txm.go index f6f5e1aba15..53a432fe56f 100644 --- a/core/chains/terra/terratxm/txm.go +++ b/core/chains/terra/terratxm/txm.go @@ -9,6 +9,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/pkg/errors" "github.com/smartcontractkit/sqlx" + "golang.org/x/exp/slices" sdk "github.com/cosmos/cosmos-sdk/types" txtypes "github.com/cosmos/cosmos-sdk/types/tx" @@ -157,23 +158,61 @@ func unmarshalMsg(msgType string, raw []byte) (sdk.Msg, string, error) { return nil, "", errors.Errorf("unrecognized message type: %s", msgType) } +type msgValidator struct { + cutoff time.Time + expired, valid terra.Msgs +} + +func (e *msgValidator) add(msg terra.Msg) { + if msg.CreatedAt.Before(e.cutoff) { + e.expired = append(e.expired, msg) + } else { + e.valid = append(e.valid, msg) + } +} + +func (e *msgValidator) sortValid() { + slices.SortFunc(e.valid, func(a, b terra.Msg) bool { + ac, bc := a.CreatedAt, b.CreatedAt + if ac.Equal(bc) { + return a.ID < b.ID + } + return ac.Before(bc) + }) +} + func (txm *Txm) sendMsgBatch(ctx context.Context) { - var notExpired, expired terra.Msgs + msgs := msgValidator{cutoff: time.Now().Add(-txm.cfg.TxMsgTimeout())} err := txm.orm.q.Transaction(func(tx pg.Queryer) error { - unstarted, err := txm.orm.GetMsgsState(db.Unstarted, txm.cfg.MaxMsgsPerBatch(), pg.WithQueryer(tx)) + // There may be leftover Started messages after a crash or failed send attempt. + started, err := txm.orm.GetMsgsState(db.Started, txm.cfg.MaxMsgsPerBatch(), pg.WithQueryer(tx)) if err != nil { txm.lggr.Errorw("unable to read unstarted msgs", "err", err) return err } - cutoff := time.Now().Add(-txm.cfg.TxMsgTimeout()) - for _, msg := range unstarted { - if msg.CreatedAt.Before(cutoff) { - expired = append(expired, msg) - } else { - notExpired = append(notExpired, msg) + if limit := txm.cfg.MaxMsgsPerBatch() - int64(len(started)); limit > 0 { + // Use the remaining batch budget for Unstarted + unstarted, err := txm.orm.GetMsgsState(db.Unstarted, limit, pg.WithQueryer(tx)) //nolint + if err != nil { + txm.lggr.Errorw("unable to read unstarted msgs", "err", err) + return err } + for _, msg := range unstarted { + msgs.add(msg) + } + // Update valid, Unstarted messages to Started + err = txm.orm.UpdateMsgs(msgs.valid.GetIDs(), db.Started, nil, pg.WithQueryer(tx)) + if err != nil { + // Assume transient db error retry + txm.lggr.Errorw("unable to mark unstarted txes as started", "err", err) + return err + } + } + for _, msg := range started { + msgs.add(msg) } - err = txm.orm.UpdateMsgs(expired.GetIDs(), db.Errored, nil, pg.WithQueryer(tx)) + // Update expired messages (Unstarted or Started) to Errored + err = txm.orm.UpdateMsgs(msgs.expired.GetIDs(), db.Errored, nil, pg.WithQueryer(tx)) if err != nil { // Assume transient db error retry txm.lggr.Errorw("unable to mark expired txes as errored", "err", err) @@ -184,12 +223,13 @@ func (txm *Txm) sendMsgBatch(ctx context.Context) { if err != nil { return } - if len(notExpired) == 0 { + if len(msgs.valid) == 0 { return } - txm.lggr.Debugw("building a batch", "not expired", notExpired, "marked expired", expired) + msgs.sortValid() + txm.lggr.Debugw("building a batch", "not expired", msgs.valid, "marked expired", msgs.expired) var msgsByFrom = make(map[string]terra.Msgs) - for _, m := range notExpired { + for _, m := range msgs.valid { msg, sender, err2 := unmarshalMsg(m.Type, m.Raw) if err2 != nil { // Should be impossible given the check in Enqueue @@ -293,6 +333,8 @@ func (txm *Txm) sendMsgBatchFromAddress(ctx context.Context, gasPrice sdk.DecCoi // We need to ensure that we either broadcast successfully and mark the tx as // broadcasted OR we do not broadcast successfully and we do not mark it as broadcasted. // We do this by first marking it broadcasted then rolling back if the broadcast api call fails. + // There is still a small chance of network failure or node/db crash after broadcasting but before committing the tx, + // in which case the msgs would be picked up again and re-broadcast, ensuring at-least once delivery. var resp *txtypes.BroadcastTxResponse err = txm.orm.q.Transaction(func(tx pg.Queryer) error { txHash := strings.ToUpper(hex.EncodeToString(tmhash.Sum(signedTx))) @@ -396,36 +438,55 @@ func (txm *Txm) confirmTx(ctx context.Context, tc terraclient.Reader, txHash str // Enqueue enqueue a msg destined for the terra chain. func (txm *Txm) Enqueue(contractID string, msg sdk.Msg) (int64, error) { + typeURL, raw, err := txm.marshalMsg(msg) + if err != nil { + return 0, err + } + + // We could consider simulating here too, but that would + // introduce another network call and essentially double + // the enqueue time. Enqueue is used in the context of OCRs Transmit + // and must be fast, so we do the minimum. + + var id int64 + err = txm.orm.q.Transaction(func(tx pg.Queryer) (err error) { + // cancel any unstarted msgs (normally just one) + err = txm.orm.UpdateMsgsContract(contractID, db.Unstarted, db.Errored, pg.WithQueryer(tx)) + if err != nil { + return err + } + id, err = txm.orm.InsertMsg(contractID, typeURL, raw, pg.WithQueryer(tx)) + return err + }) + return id, err +} + +func (txm *Txm) marshalMsg(msg sdk.Msg) (string, []byte, error) { switch ms := msg.(type) { case *wasmtypes.MsgExecuteContract: _, err := sdk.AccAddressFromBech32(ms.Sender) if err != nil { txm.lggr.Errorw("failed to parse sender, skipping", "err", err, "sender", ms.Sender) - return 0, err + return "", nil, err } case *types.MsgSend: _, err := sdk.AccAddressFromBech32(ms.FromAddress) if err != nil { txm.lggr.Errorw("failed to parse sender, skipping", "err", err, "sender", ms.FromAddress) - return 0, err + return "", nil, err } default: - return 0, &terra.ErrMsgUnsupported{Msg: msg} + return "", nil, &terra.ErrMsgUnsupported{Msg: msg} } typeURL := sdk.MsgTypeURL(msg) raw, err := proto.Marshal(msg) if err != nil { txm.lggr.Errorw("failed to marshal msg, skipping", "err", err, "msg", msg) - return 0, err + return "", nil, err } - - // We could consider simulating here too, but that would - // introduce another network call and essentially double - // the enqueue time. Enqueue is used in the context of OCRs Transmit - // and must be fast, so we do the minimum of a db write. - return txm.orm.InsertMsg(contractID, typeURL, raw) + return typeURL, raw, nil } // GetMsgs returns any messages matching ids. diff --git a/core/chains/terra/terratxm/txm_internal_test.go b/core/chains/terra/terratxm/txm_internal_test.go index e0bc13dd2ab..a94206015bc 100644 --- a/core/chains/terra/terratxm/txm_internal_test.go +++ b/core/chains/terra/terratxm/txm_internal_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" "gopkg.in/guregu/null.v4" tmservicetypes "github.com/cosmos/cosmos-sdk/client/grpc/tmservice" @@ -22,12 +23,10 @@ import ( "github.com/smartcontractkit/chainlink-terra/pkg/terra" terraclient "github.com/smartcontractkit/chainlink-terra/pkg/terra/client" tcmocks "github.com/smartcontractkit/chainlink-terra/pkg/terra/client/mocks" - terradb "github.com/smartcontractkit/chainlink-terra/pkg/terra/db" "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/core/internal/testutils/terratest" - "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/keystore" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/utils" @@ -41,7 +40,7 @@ func generateExecuteMsg(t *testing.T, msg []byte, from, to cosmostypes.AccAddres func TestTxm(t *testing.T) { db := pgtest.NewSqlxDB(t) - lggr := logger.TestLogger(t) + lggr := testutils.LoggerAssertMaxLevel(t, zapcore.ErrorLevel) ks := keystore.New(db, utils.FastScryptParams, lggr, pgtest.NewPGCfg(true)) require.NoError(t, ks.Unlock("blah")) k1, err := ks.Terra().Create() @@ -54,11 +53,13 @@ func TestTxm(t *testing.T) { require.NoError(t, err) contract, err := cosmostypes.AccAddressFromBech32("terra1pp76d50yv2ldaahsdxdv8mmzqfjr2ax97gmue8") require.NoError(t, err) + contract2, err := cosmostypes.AccAddressFromBech32("terra1mx72uukvzqtzhc6gde7shrjqfu5srk22v7gmww") + require.NoError(t, err) logCfg := pgtest.NewPGCfg(true) chainID := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) - terratest.MustInsertChain(t, db, &terradb.Chain{ID: chainID}) + terratest.MustInsertChain(t, db, &Chain{ID: chainID}) require.NoError(t, err) - cfg := terra.NewConfig(terradb.ChainCfg{ + cfg := terra.NewConfig(ChainCfg{ MaxMsgsPerBatch: null.IntFrom(2), }, lggr) gpe := terraclient.NewMustGasPriceEstimator([]terraclient.GasPricesEstimator{ @@ -90,13 +91,10 @@ func TestTxm(t *testing.T) { Header: tmtypes.Header{Height: 1}, }}, nil) tc.On("CreateAndSign", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]byte{0x01}, nil) - tc.On("Broadcast", mock.Anything, mock.Anything).Return(&txtypes.BroadcastTxResponse{ - TxResponse: &cosmostypes.TxResponse{TxHash: "0x123"}, - }, nil) - tc.On("Tx", mock.Anything).Return(&txtypes.GetTxResponse{ - Tx: &txtypes.Tx{}, - TxResponse: &cosmostypes.TxResponse{TxHash: "0x123"}, - }, nil) + + txResp := &cosmostypes.TxResponse{TxHash: "4BF5122F344554C53BDE2EBB8CD2B7E3D1600AD631C385A5D7CCE23C7785459A"} + tc.On("Broadcast", mock.Anything, mock.Anything).Return(&txtypes.BroadcastTxResponse{TxResponse: txResp}, nil) + tc.On("Tx", mock.Anything).Return(&txtypes.GetTxResponse{Tx: &txtypes.Tx{}, TxResponse: txResp}, nil) txm.sendMsgBatch(testutils.Context(t)) // Should be in completed state @@ -116,8 +114,65 @@ func TestTxm(t *testing.T) { require.NoError(t, err) id2, err := txm.Enqueue(contract.String(), generateExecuteMsg(t, []byte(`1`), sender2, contract)) require.NoError(t, err) + + tc.On("Account", mock.Anything).Return(uint64(0), uint64(0), nil).Once() + // Note this must be arg dependent, we don't know which order + // the procesing will happen in (map iteration by from address). + tc.On("BatchSimulateUnsigned", terraclient.SimMsgs{ + { + ID: id2, + Msg: &wasmtypes.MsgExecuteContract{ + Sender: sender2.String(), + ExecuteMsg: []byte(`1`), + Contract: contract.String(), + }, + }, + }, mock.Anything).Return(&terraclient.BatchSimResults{ + Failed: nil, + Succeeded: terraclient.SimMsgs{ + { + ID: id2, + Msg: &wasmtypes.MsgExecuteContract{ + Sender: sender2.String(), + ExecuteMsg: []byte(`1`), + Contract: contract.String(), + }, + }, + }, + }, nil).Once() + tc.On("SimulateUnsigned", mock.Anything, mock.Anything).Return(&txtypes.SimulateResponse{GasInfo: &cosmostypes.GasInfo{ + GasUsed: 1_000_000, + }}, nil).Once() + tc.On("LatestBlock").Return(&tmservicetypes.GetLatestBlockResponse{Block: &tmtypes.Block{ + Header: tmtypes.Header{Height: 1}, + }}, nil).Once() + tc.On("CreateAndSign", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]byte{0x01}, nil).Once() + txResp := &cosmostypes.TxResponse{TxHash: "4BF5122F344554C53BDE2EBB8CD2B7E3D1600AD631C385A5D7CCE23C7785459A"} + tc.On("Broadcast", mock.Anything, mock.Anything).Return(&txtypes.BroadcastTxResponse{TxResponse: txResp}, nil).Once() + tc.On("Tx", mock.Anything).Return(&txtypes.GetTxResponse{Tx: &txtypes.Tx{}, TxResponse: txResp}, nil).Once() + txm.sendMsgBatch(testutils.Context(t)) + + // Should be in completed state + completed, err := txm.orm.GetMsgs(id1, id2) + require.NoError(t, err) + require.Equal(t, 2, len(completed)) + assert.Equal(t, Errored, completed[0].State) // cancelled + assert.Equal(t, Confirmed, completed[1].State) + tc.AssertExpectations(t) + }) + + t.Run("two msgs different contracts", func(t *testing.T) { + tc := new(tcmocks.ReaderWriter) + tcFn := func() (terraclient.ReaderWriter, error) { return tc, nil } + txm := NewTxm(db, tcFn, *gpe, chainID, cfg, ks.Terra(), lggr, pgtest.NewPGCfg(true), nil) + + id1, err := txm.Enqueue(contract.String(), generateExecuteMsg(t, []byte(`0`), sender1, contract)) + require.NoError(t, err) + id2, err := txm.Enqueue(contract2.String(), generateExecuteMsg(t, []byte(`1`), sender2, contract2)) + require.NoError(t, err) ids := []int64{id1, id2} senders := []string{sender1.String(), sender2.String()} + contracts := []string{contract.String(), contract2.String()} for i := 0; i < 2; i++ { tc.On("Account", mock.Anything).Return(uint64(0), uint64(0), nil).Once() // Note this must be arg dependent, we don't know which order @@ -128,7 +183,7 @@ func TestTxm(t *testing.T) { Msg: &wasmtypes.MsgExecuteContract{ Sender: senders[i], ExecuteMsg: []byte(fmt.Sprintf(`%d`, i)), - Contract: contract.String(), + Contract: contracts[i], }, }, }, mock.Anything).Return(&terraclient.BatchSimResults{ @@ -139,7 +194,7 @@ func TestTxm(t *testing.T) { Msg: &wasmtypes.MsgExecuteContract{ Sender: senders[i], ExecuteMsg: []byte(fmt.Sprintf(`%d`, i)), - Contract: contract.String(), + Contract: contracts[i], }, }, }, @@ -151,22 +206,18 @@ func TestTxm(t *testing.T) { Header: tmtypes.Header{Height: 1}, }}, nil).Once() tc.On("CreateAndSign", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]byte{0x01}, nil).Once() - tc.On("Broadcast", mock.Anything, mock.Anything).Return(&txtypes.BroadcastTxResponse{ - TxResponse: &cosmostypes.TxResponse{TxHash: "0x123"}, - }, nil).Once() - tc.On("Tx", mock.Anything).Return(&txtypes.GetTxResponse{ - Tx: &txtypes.Tx{}, - TxResponse: &cosmostypes.TxResponse{TxHash: "0x123"}, - }, nil).Once() } + txResp := &cosmostypes.TxResponse{TxHash: "4BF5122F344554C53BDE2EBB8CD2B7E3D1600AD631C385A5D7CCE23C7785459A"} + tc.On("Broadcast", mock.Anything, mock.Anything).Return(&txtypes.BroadcastTxResponse{TxResponse: txResp}, nil).Twice() + tc.On("Tx", mock.Anything).Return(&txtypes.GetTxResponse{Tx: &txtypes.Tx{}, TxResponse: txResp}, nil).Twice() txm.sendMsgBatch(testutils.Context(t)) // Should be in completed state completed, err := txm.orm.GetMsgs(id1, id2) require.NoError(t, err) require.Equal(t, 2, len(completed)) - assert.Equal(t, completed[0].State, Confirmed) - assert.Equal(t, completed[1].State, Confirmed) + assert.Equal(t, Confirmed, completed[0].State) + assert.Equal(t, Confirmed, completed[1].State) tc.AssertExpectations(t) }) @@ -176,12 +227,13 @@ func TestTxm(t *testing.T) { Tx: &txtypes.Tx{}, TxResponse: &cosmostypes.TxResponse{TxHash: "0x123"}, }, errors.New("not found")).Twice() - cfg := terra.NewConfig(terradb.ChainCfg{}, lggr) + cfg := terra.NewConfig(ChainCfg{}, lggr) tcFn := func() (terraclient.ReaderWriter, error) { return tc, nil } txm := NewTxm(db, tcFn, *gpe, chainID, cfg, ks.Terra(), lggr, pgtest.NewPGCfg(true), nil) i, err := txm.orm.InsertMsg("blah", "", []byte{0x01}) require.NoError(t, err) txh := "0x123" + require.NoError(t, txm.orm.UpdateMsgs([]int64{i}, Started, &txh)) require.NoError(t, txm.orm.UpdateMsgs([]int64{i}, Broadcasted, &txh)) err = txm.confirmTx(testutils.Context(t), tc, txh, []int64{i}, 2, 1*time.Millisecond) require.NoError(t, err) @@ -217,6 +269,12 @@ func TestTxm(t *testing.T) { require.NoError(t, err) id3, err := txm.orm.InsertMsg("blah", "", []byte{0x03}) require.NoError(t, err) + err = txm.orm.UpdateMsgs([]int64{id1}, Started, &txHash1) + require.NoError(t, err) + err = txm.orm.UpdateMsgs([]int64{id2}, Started, &txHash2) + require.NoError(t, err) + err = txm.orm.UpdateMsgs([]int64{id3}, Started, &txHash3) + require.NoError(t, err) err = txm.orm.UpdateMsgs([]int64{id1}, Broadcasted, &txHash1) require.NoError(t, err) err = txm.orm.UpdateMsgs([]int64{id2}, Broadcasted, &txHash2) @@ -239,7 +297,7 @@ func TestTxm(t *testing.T) { tc := new(tcmocks.ReaderWriter) timeout := models.MustMakeDuration(1 * time.Millisecond) tcFn := func() (terraclient.ReaderWriter, error) { return tc, nil } - cfgShortExpiry := terra.NewConfig(terradb.ChainCfg{ + cfgShortExpiry := terra.NewConfig(ChainCfg{ MaxMsgsPerBatch: null.IntFrom(2), TxMsgTimeout: &timeout, }, lggr) @@ -253,17 +311,88 @@ func TestTxm(t *testing.T) { // Should be marked errored m, err := txm.orm.GetMsgs(id1) require.NoError(t, err) - assert.Equal(t, terradb.Errored, m[0].State) + assert.Equal(t, Errored, m[0].State) // Send a batch which is all expired id2, err := txm.orm.InsertMsg("blah", "", []byte{0x03}) + require.NoError(t, err) id3, err := txm.orm.InsertMsg("blah", "", []byte{0x03}) require.NoError(t, err) time.Sleep(1 * time.Millisecond) txm.sendMsgBatch(context.Background()) require.NoError(t, err) ms, err := txm.orm.GetMsgs(id2, id3) - assert.Equal(t, terradb.Errored, ms[0].State) - assert.Equal(t, terradb.Errored, ms[1].State) + assert.Equal(t, Errored, ms[0].State) + assert.Equal(t, Errored, ms[1].State) }) + + t.Run("started msgs", func(t *testing.T) { + tc := new(tcmocks.ReaderWriter) + tc.On("Account", mock.Anything).Return(uint64(0), uint64(0), nil) + tc.On("SimulateUnsigned", mock.Anything, mock.Anything).Return(&txtypes.SimulateResponse{GasInfo: &cosmostypes.GasInfo{ + GasUsed: 1_000_000, + }}, nil) + tc.On("LatestBlock").Return(&tmservicetypes.GetLatestBlockResponse{Block: &tmtypes.Block{ + Header: tmtypes.Header{Height: 1}, + }}, nil) + tc.On("CreateAndSign", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]byte{0x01}, nil) + txResp := &cosmostypes.TxResponse{TxHash: "4BF5122F344554C53BDE2EBB8CD2B7E3D1600AD631C385A5D7CCE23C7785459A"} + tc.On("Broadcast", mock.Anything, mock.Anything).Return(&txtypes.BroadcastTxResponse{TxResponse: txResp}, nil) + tc.On("Tx", mock.Anything).Return(&txtypes.GetTxResponse{Tx: &txtypes.Tx{}, TxResponse: txResp}, nil) + tcFn := func() (terraclient.ReaderWriter, error) { return tc, nil } + cfg := terra.NewConfig(ChainCfg{ + MaxMsgsPerBatch: null.IntFrom(2), + }, lggr) + txm := NewTxm(db, tcFn, *gpe, chainID, cfg, ks.Terra(), lggr, pgtest.NewPGCfg(true), nil) + + // Leftover started is processed + msg1 := generateExecuteMsg(t, []byte{0x03}, sender1, contract) + id1 := mustInsertMsg(t, txm, contract.String(), msg1) + require.NoError(t, txm.orm.UpdateMsgs([]int64{id1}, Started, nil)) + msgs := terraclient.SimMsgs{{ID: id1, Msg: &wasmtypes.MsgExecuteContract{ + Sender: sender1.String(), + ExecuteMsg: []byte{0x03}, + Contract: contract.String(), + }}} + tc.On("BatchSimulateUnsigned", msgs, mock.Anything). + Return(&terraclient.BatchSimResults{Failed: nil, Succeeded: msgs}, nil).Once() + time.Sleep(1 * time.Millisecond) + txm.sendMsgBatch(context.Background()) + m, err := txm.orm.GetMsgs(id1) + require.NoError(t, err) + assert.Equal(t, Confirmed, m[0].State) + + // Leftover started is not cancelled + msg2 := generateExecuteMsg(t, []byte{0x04}, sender1, contract) + msg3 := generateExecuteMsg(t, []byte{0x05}, sender1, contract) + id2 := mustInsertMsg(t, txm, contract.String(), msg2) + require.NoError(t, txm.orm.UpdateMsgs([]int64{id2}, Started, nil)) + time.Sleep(time.Millisecond) // ensure != CreatedAt + id3 := mustInsertMsg(t, txm, contract.String(), msg3) + msgs = terraclient.SimMsgs{{ID: id2, Msg: &wasmtypes.MsgExecuteContract{ + Sender: sender1.String(), + ExecuteMsg: []byte{0x04}, + Contract: contract.String(), + }}, {ID: id3, Msg: &wasmtypes.MsgExecuteContract{ + Sender: sender1.String(), + ExecuteMsg: []byte{0x05}, + Contract: contract.String(), + }}} + tc.On("BatchSimulateUnsigned", msgs, mock.Anything). + Return(&terraclient.BatchSimResults{Failed: nil, Succeeded: msgs}, nil).Once() + time.Sleep(1 * time.Millisecond) + txm.sendMsgBatch(context.Background()) + require.NoError(t, err) + ms, err := txm.orm.GetMsgs(id2, id3) + assert.Equal(t, Confirmed, ms[0].State) + assert.Equal(t, Confirmed, ms[1].State) + }) +} + +func mustInsertMsg(t *testing.T, txm *Txm, contractID string, msg cosmostypes.Msg) int64 { + typeURL, raw, err := txm.marshalMsg(msg) + require.NoError(t, err) + id, err := txm.orm.InsertMsg(contractID, typeURL, raw) + require.NoError(t, err) + return id } diff --git a/core/chains/terra/terratxm/txm_test.go b/core/chains/terra/terratxm/txm_test.go index 0fcb29631a7..77cf5209e29 100644 --- a/core/chains/terra/terratxm/txm_test.go +++ b/core/chains/terra/terratxm/txm_test.go @@ -34,7 +34,7 @@ import ( ) func TestTxm_Integration(t *testing.T) { - cfg, db := heavyweight.FullTestDB(t, "terra_txm", true, false) + cfg, db := heavyweight.FullTestDBNoFixtures(t, "terra_txm") lggr := logger.TestLogger(t) chainID := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) logCfg := pgtest.NewPGCfg(true) diff --git a/core/chains/terra/types/types.go b/core/chains/terra/types/types.go index 2e321793cd1..a3a075b3ac0 100644 --- a/core/chains/terra/types/types.go +++ b/core/chains/terra/types/types.go @@ -3,26 +3,31 @@ package types import ( "github.com/smartcontractkit/chainlink-terra/pkg/terra/db" + "github.com/smartcontractkit/chainlink/core/chains" "github.com/smartcontractkit/chainlink/core/services/pg" ) // ORM manages terra chains and nodes. type ORM interface { - Chain(string, ...pg.QOpt) (db.Chain, error) - Chains(offset, limit int, qopts ...pg.QOpt) ([]db.Chain, int, error) - CreateChain(id string, config db.ChainCfg, qopts ...pg.QOpt) (db.Chain, error) - UpdateChain(id string, enabled bool, config db.ChainCfg, qopts ...pg.QOpt) (db.Chain, error) + Chain(string, ...pg.QOpt) (Chain, error) + Chains(offset, limit int, qopts ...pg.QOpt) ([]Chain, int, error) + CreateChain(id string, config db.ChainCfg, qopts ...pg.QOpt) (Chain, error) + UpdateChain(id string, enabled bool, config db.ChainCfg, qopts ...pg.QOpt) (Chain, error) DeleteChain(id string, qopts ...pg.QOpt) error - EnabledChains(...pg.QOpt) ([]db.Chain, error) + EnabledChains(...pg.QOpt) ([]Chain, error) - CreateNode(NewNode, ...pg.QOpt) (db.Node, error) + CreateNode(db.Node, ...pg.QOpt) (db.Node, error) DeleteNode(int32, ...pg.QOpt) error Node(int32, ...pg.QOpt) (db.Node, error) NodeNamed(string, ...pg.QOpt) (db.Node, error) Nodes(offset, limit int, qopts ...pg.QOpt) (nodes []db.Node, count int, err error) NodesForChain(chainID string, offset, limit int, qopts ...pg.QOpt) (nodes []db.Node, count int, err error) + + SetupNodes([]db.Node, []string) error } +type Chain = chains.Chain[string, db.ChainCfg] + // NewNode defines a new node to create. type NewNode struct { Name string `json:"name"` diff --git a/core/cmd/app.go b/core/cmd/app.go index 1eec620dea8..99b0f7502c1 100644 --- a/core/cmd/app.go +++ b/core/cmd/app.go @@ -969,6 +969,27 @@ func NewApp(client *Client) *cli.App { }, }, }, + { + Name: "solana", + Usage: "Commands for handling Solana transactions", + Subcommands: []cli.Command{ + { + Name: "create", + Usage: "Send lamports from node Solana account to destination .", + Action: client.SolanaSendSol, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "force", + Usage: "allows to send a higher amount than the account's balance", + }, + cli.StringFlag{ + Name: "id", + Usage: "chain ID, options: [mainnet, testnet, devnet, localnet]", + }, + }, + }, + }, + }, { Name: "terra", Usage: "Commands for handling Terra transactions", @@ -1034,6 +1055,44 @@ func NewApp(client *Client) *cli.App { }, }, }, + { + Name: "solana", + Usage: "Commands for handling Solana chains", + Subcommands: cli.Commands{ + { + Name: "create", + Usage: "Create a new Solana chain", + Action: client.CreateSolanaChain, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "id", + Usage: "chain ID, options: [mainnet, testnet, devnet, localnet]", + }, + }, + }, + { + Name: "delete", + Usage: "Delete a Solana chain", + Action: client.RemoveSolanaChain, + }, + { + Name: "list", + Usage: "List all Solana chains", + Action: client.IndexSolanaChains, + }, + { + Name: "configure", + Usage: "Configure a Solana chain", + Action: client.ConfigureSolanaChain, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "id", + Usage: "chain ID, options: [mainnet, testnet, devnet, localnet]", + }, + }, + }, + }, + }, { Name: "terra", Usage: "Commands for handling Terra chains", @@ -1121,6 +1180,41 @@ func NewApp(client *Client) *cli.App { }, }, }, + { + Name: "solana", + Usage: "Commands for handling Solana node configuration", + Subcommands: cli.Commands{ + { + Name: "create", + Usage: "Create a new Solana node", + Action: client.CreateSolanaNode, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "name", + Usage: "node name", + }, + cli.StringFlag{ + Name: "chain-id", + Usage: "chain ID, options: [mainnet, testnet, devnet, localnet]", + }, + cli.StringFlag{ + Name: "url", + Usage: "URL", + }, + }, + }, + { + Name: "delete", + Usage: "Delete a Solana node", + Action: client.RemoveSolanaNode, + }, + { + Name: "list", + Usage: "List all Solana nodes", + Action: client.IndexSolanaNodes, + }, + }, + }, { Name: "terra", Usage: "Commands for handling Terra node configuration", diff --git a/core/cmd/client.go b/core/cmd/client.go index f28a2e10ac2..e561560030c 100644 --- a/core/cmd/client.go +++ b/core/cmd/client.go @@ -25,7 +25,10 @@ import ( "go.uber.org/zap/zapcore" "golang.org/x/sync/errgroup" + "github.com/smartcontractkit/sqlx" + "github.com/smartcontractkit/chainlink/core/chains/evm" + "github.com/smartcontractkit/chainlink/core/chains/solana" "github.com/smartcontractkit/chainlink/core/chains/terra" "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/logger" @@ -41,7 +44,6 @@ import ( "github.com/smartcontractkit/chainlink/core/store/migrate" "github.com/smartcontractkit/chainlink/core/utils" "github.com/smartcontractkit/chainlink/core/web" - "github.com/smartcontractkit/sqlx" ) var prometheus *ginprom.Prometheus @@ -162,6 +164,9 @@ func (n ChainlinkAppFactory) NewApplication(cfg config.GeneralConfig, db *sqlx.D if cfg.TerraEnabled() { terraLggr := appLggr.Named("Terra") + if err := terra.SetupNodes(db, cfg, terraLggr); err != nil { + return nil, errors.Wrap(err, "failed to setup Terra nodes") + } chains.Terra, err = terra.NewChainSet(terra.ChainSetOpts{ Config: cfg, Logger: terraLggr, @@ -175,6 +180,24 @@ func (n ChainlinkAppFactory) NewApplication(cfg config.GeneralConfig, db *sqlx.D } } + if cfg.SolanaEnabled() { + solLggr := appLggr.Named("Solana") + if err := solana.SetupNodes(db, cfg, solLggr); err != nil { + return nil, errors.Wrap(err, "failed to setup Solana nodes") + } + chains.Solana, err = solana.NewChainSet(solana.ChainSetOpts{ + Config: cfg, + Logger: solLggr, + DB: db, + KeyStore: keyStore.Solana(), + EventBroadcaster: eventBroadcaster, + ORM: solana.NewORM(db, solLggr, cfg), + }) + if err != nil { + return nil, errors.Wrap(err, "failed to load Solana chainset") + } + } + externalInitiatorManager := webhook.NewExternalInitiatorManager(db, pipeline.UnrestrictedClient, appLggr, cfg) return chainlink.NewApplication(chainlink.ApplicationOpts{ Config: cfg, diff --git a/core/cmd/evm_node_commands_test.go b/core/cmd/evm_node_commands_test.go index 6ebbb2da66e..6cfc07db1e5 100644 --- a/core/cmd/evm_node_commands_test.go +++ b/core/cmd/evm_node_commands_test.go @@ -6,18 +6,20 @@ import ( "strconv" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + "gopkg.in/guregu/null.v4" + "github.com/smartcontractkit/chainlink/core/chains/evm/types" "github.com/smartcontractkit/chainlink/core/cmd" "github.com/smartcontractkit/chainlink/core/internal/cltest" + "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/urfave/cli" - null "gopkg.in/guregu/null.v4" ) func mustInsertEVMChain(t *testing.T, orm types.ORM) types.Chain { - id := utils.NewBigI(99) + id := utils.NewBig(testutils.NewRandomEVMChainID()) config := types.ChainCfg{} chain, err := orm.CreateChain(*id, config) require.NoError(t, err) @@ -44,7 +46,7 @@ func TestClient_IndexEVMNodes(t *testing.T) { require.NoError(t, err) chain := mustInsertEVMChain(t, orm) - params := types.NewNode{ + params := types.Node{ Name: "Test node", EVMChainID: chain.ID, WSURL: null.StringFrom("ws://localhost:8546"), @@ -131,7 +133,7 @@ func TestClient_RemoveEVMNode(t *testing.T) { chain := mustInsertEVMChain(t, orm) - params := types.NewNode{ + params := types.Node{ Name: "Test node", EVMChainID: chain.ID, WSURL: null.StringFrom("ws://localhost:8546"), diff --git a/core/cmd/local_client.go b/core/cmd/local_client.go index e38c9e7d68b..9c0e55fab5d 100644 --- a/core/cmd/local_client.go +++ b/core/cmd/local_client.go @@ -23,13 +23,12 @@ import ( "github.com/fatih/color" "github.com/kylelemons/godebug/diff" "github.com/pkg/errors" + "github.com/smartcontractkit/sqlx" clipkg "github.com/urfave/cli" "go.uber.org/multierr" "golang.org/x/sync/errgroup" "gopkg.in/guregu/null.v4" - "github.com/smartcontractkit/sqlx" - "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr" "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/logger" @@ -47,6 +46,10 @@ import ( // ownerPermsMask are the file permission bits reserved for owner. const ownerPermsMask = os.FileMode(0700) +// PristineDBName is a clean copy of test DB with migrations. +// Used by heavyweight.FullTestDB* functions. +const PristineDBName = "chainlink_test_pristine" + // RunNode starts the Chainlink core. func (cli *Client) RunNode(c *clipkg.Context) error { if err := cli.runNode(c); err != nil { @@ -503,6 +506,19 @@ func (cli *Client) PrepareTestDatabase(c *clipkg.Context) error { return cli.errorOut(err) } cfg := cli.Config + + // Creating pristine DB copy to speed up FullTestDB + dbUrl := cfg.DatabaseURL() + db, err := sql.Open(string(dialects.Postgres), dbUrl.String()) + defer db.Close() + if err != nil { + return cli.errorOut(err) + } + templateDB := strings.Trim(dbUrl.Path, "/") + if err = dropAndCreatePristineDB(db, templateDB); err != nil { + return cli.errorOut(err) + } + userOnly := c.Bool("user-only") var fixturePath = "../store/fixtures/fixtures.sql" if userOnly { @@ -511,6 +527,7 @@ func (cli *Client) PrepareTestDatabase(c *clipkg.Context) error { if err := insertFixtures(cfg, fixturePath); err != nil { return cli.errorOut(err) } + return nil } @@ -656,6 +673,18 @@ func dropAndCreateDB(parsed url.URL) (err error) { return nil } +func dropAndCreatePristineDB(db *sql.DB, template string) (err error) { + _, err = db.Exec(fmt.Sprintf(`DROP DATABASE IF EXISTS "%s"`, PristineDBName)) + if err != nil { + return fmt.Errorf("unable to drop postgres database: %v", err) + } + _, err = db.Exec(fmt.Sprintf(`CREATE DATABASE "%s" WITH TEMPLATE "%s"`, PristineDBName, template)) + if err != nil { + return fmt.Errorf("unable to create postgres database: %v", err) + } + return nil +} + func migrateDB(config config.GeneralConfig, lggr logger.Logger) error { db, err := newConnection(config, lggr) if err != nil { diff --git a/core/cmd/local_client_test.go b/core/cmd/local_client_test.go index d81c51d00d9..7a2e8acb4e6 100644 --- a/core/cmd/local_client_test.go +++ b/core/cmd/local_client_test.go @@ -74,9 +74,9 @@ func TestClient_RunNodeShowsEnv(t *testing.T) { assert.NoError(t, err) lcfg := logger.Config{ - LogLevel: zapcore.DebugLevel, - FileMaxSize: int(logFileSize), - Dir: t.TempDir(), + LogLevel: zapcore.DebugLevel, + FileMaxSizeMB: int(logFileSize / utils.MB), + Dir: t.TempDir(), } tmpFile, err := os.CreateTemp(lcfg.Dir, "*") @@ -155,6 +155,8 @@ KEEPER_REGISTRY_PERFORM_GAS_OVERHEAD: 0 KEEPER_REGISTRY_SYNC_INTERVAL: KEEPER_REGISTRY_SYNC_UPKEEP_QUEUE_SIZE: 0 KEEPER_CHECK_UPKEEP_GAS_PRICE_FEATURE_ENABLED: false +KEEPER_TURN_LOOK_BACK: 1000 +KEEPER_TURN_FLAG_ENABLED: false LEASE_LOCK_DURATION: 10s LEASE_LOCK_REFRESH_INTERVAL: 1s FLAGS_CONTRACT_ADDRESS: @@ -415,8 +417,8 @@ func TestClient_DiskMaxSizeBeforeRotateOptionDisablesAsExpected(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := logger.Config{ - Dir: t.TempDir(), - FileMaxSize: int(tt.logFileSize(t)), + Dir: t.TempDir(), + FileMaxSizeMB: int(tt.logFileSize(t) / utils.MB), } assert.NoError(t, os.MkdirAll(cfg.Dir, os.FileMode(0700))) @@ -437,7 +439,7 @@ func TestClient_RebroadcastTransactions_Txm(t *testing.T) { // Use the a non-transactional db for this test because we need to // test multiple connections to the database, and changes made within // the transaction cannot be seen from another connection. - config, sqlxDB := heavyweight.FullTestDB(t, "rebroadcasttransactions", true, true) + config, sqlxDB := heavyweight.FullTestDB(t, "rebroadcasttransactions") keyStore := cltest.NewKeyStore(t, sqlxDB, config) _, fromAddress := cltest.MustInsertRandomKey(t, keyStore.Eth(), 0) @@ -513,7 +515,7 @@ func TestClient_RebroadcastTransactions_OutsideRange_Txm(t *testing.T) { // Use the a non-transactional db for this test because we need to // test multiple connections to the database, and changes made within // the transaction cannot be seen from another connection. - config, sqlxDB := heavyweight.FullTestDB(t, "rebroadcasttransactions_outsiderange", true, true) + config, sqlxDB := heavyweight.FullTestDB(t, "rebroadcasttransactions_outsiderange") config.Overrides.Dialect = dialects.Postgres keyStore := cltest.NewKeyStore(t, sqlxDB, config) @@ -574,7 +576,7 @@ func TestClient_RebroadcastTransactions_OutsideRange_Txm(t *testing.T) { func TestClient_SetNextNonce(t *testing.T) { // Need to use separate database - config, sqlxDB := heavyweight.FullTestDB(t, "setnextnonce", true, true) + config, sqlxDB := heavyweight.FullTestDB(t, "setnextnonce") ethKeyStore := cltest.NewKeyStore(t, sqlxDB, config).Eth() lggr := logger.TestLogger(t) diff --git a/core/cmd/mocks/prompter.go b/core/cmd/mocks/prompter.go index c1f8f914190..0feb30eb092 100644 --- a/core/cmd/mocks/prompter.go +++ b/core/cmd/mocks/prompter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/cmd/solana_chains_commands.go b/core/cmd/solana_chains_commands.go new file mode 100644 index 00000000000..3b6d8924eef --- /dev/null +++ b/core/cmd/solana_chains_commands.go @@ -0,0 +1,210 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/manyminds/api2go/jsonapi" + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + "github.com/smartcontractkit/chainlink/core/web/presenters" +) + +// SolanaChainPresenter implements TableRenderer for a SolanaChainResource +type SolanaChainPresenter struct { + presenters.SolanaChainResource +} + +// ToRow presents the SolanaChainResource as a slice of strings. +func (p *SolanaChainPresenter) ToRow() []string { + // NOTE: it's impossible to omitempty null fields when serializing to JSON: https://github.com/golang/go/issues/11939 + config, err := json.MarshalIndent(p.Config, "", " ") + if err != nil { + panic(err) + } + + row := []string{ + p.GetID(), + strconv.FormatBool(p.Enabled), + string(config), + p.CreatedAt.String(), + p.UpdatedAt.String(), + } + return row +} + +// RenderTable implements TableRenderer +// Just renders a single row +func (p SolanaChainPresenter) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Enabled", "Config", "Created", "Updated"} + rows := [][]string{} + rows = append(rows, p.ToRow()) + + renderList(headers, rows, rt.Writer) + + return nil +} + +// SolanaChainPresenters implements TableRenderer for a slice of SolanaChainPresenters. +type SolanaChainPresenters []SolanaChainPresenter + +// RenderTable implements TableRenderer +func (ps SolanaChainPresenters) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Enabled", "Config", "Created", "Updated"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(headers, rows, rt.Writer) + + return nil +} + +// IndexSolanaChains returns all Solana chains. +func (cli *Client) IndexSolanaChains(c *cli.Context) (err error) { + return cli.getPage("/v2/chains/solana", c.Int("page"), &SolanaChainPresenters{}) +} + +// CreateSolanaChain adds a new Solana chain. +func (cli *Client) CreateSolanaChain(c *cli.Context) (err error) { + if !c.Args().Present() { + return cli.errorOut(errors.New("must pass in the chain's parameters [-id string] [JSON blob | JSON filepath]")) + } + chainID := c.String("id") + if chainID == "" { + return cli.errorOut(errors.New("missing chain ID [-id string]")) + } + + buf, err := getBufferFromJSON(c.Args().First()) + if err != nil { + return cli.errorOut(err) + } + + params := map[string]interface{}{ + "chainID": chainID, + "config": json.RawMessage(buf.Bytes()), + } + + body, err := json.Marshal(params) + if err != nil { + return cli.errorOut(err) + } + + var resp *http.Response + resp, err = cli.HTTP.Post("/v2/chains/solana", bytes.NewBuffer(body)) + if err != nil { + return cli.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return cli.renderAPIResponse(resp, &SolanaChainPresenter{}) +} + +// RemoveSolanaChain removes a specific Solana Chain by id. +func (cli *Client) RemoveSolanaChain(c *cli.Context) (err error) { + if !c.Args().Present() { + return cli.errorOut(errors.New("must pass the id of the chain to be removed")) + } + chainID := c.Args().First() + resp, err := cli.HTTP.Delete("/v2/chains/solana/" + chainID) + if err != nil { + return cli.errorOut(err) + } + _, err = cli.parseResponse(resp) + if err != nil { + return cli.errorOut(err) + } + + fmt.Printf("Chain %v deleted\n", c.Args().First()) + return nil +} + +// ConfigureSolanaChain configures an existing Solana chain. +func (cli *Client) ConfigureSolanaChain(c *cli.Context) (err error) { + chainID := c.String("id") + if chainID == "" { + return cli.errorOut(errors.New("missing chain ID (usage: chainlink solana chains configure [-id string] [key1=value1 key2=value2 ...])")) + } + + if !c.Args().Present() { + return cli.errorOut(errors.New("must pass in at least one chain configuration parameters (usage: chainlink solana chains configure [-id string] [key1=value1 key2=value2 ...])")) + } + + // Fetch existing config + resp, err := cli.HTTP.Get(fmt.Sprintf("/v2/chains/solana/%s", chainID)) + if err != nil { + return cli.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + var chain presenters.SolanaChainResource + if err = cli.deserializeAPIResponse(resp, &chain, &jsonapi.Links{}); err != nil { + return cli.errorOut(err) + } + config := chain.Config + + // Parse new key-value pairs + params := map[string]interface{}{} + for _, arg := range c.Args() { + parts := strings.SplitN(arg, "=", 2) + if len(parts) != 2 { + return cli.errorOut(errors.Errorf("invalid parameter: %v", arg)) + } + + var value interface{} + if err = json.Unmarshal([]byte(parts[1]), &value); err != nil { + // treat it as a string + value = parts[1] + } + // TODO: handle `key=nil` and `key=` besides just null? + params[parts[0]] = value + } + + // Combine new values with the existing config + // (serialize to a partial JSON map, deserialize to the old config struct) + rawUpdates, err := json.Marshal(params) + if err != nil { + return cli.errorOut(err) + } + + err = json.Unmarshal(rawUpdates, &config) + if err != nil { + return cli.errorOut(err) + } + + // Send the new config + params = map[string]interface{}{ + "enabled": chain.Enabled, + "config": config, + } + body, err := json.Marshal(params) + if err != nil { + return cli.errorOut(err) + } + resp, err = cli.HTTP.Patch(fmt.Sprintf("/v2/chains/solana/%s", chainID), bytes.NewBuffer(body)) + if err != nil { + return cli.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return cli.renderAPIResponse(resp, &SolanaChainPresenter{}) +} diff --git a/core/cmd/solana_chains_commands_test.go b/core/cmd/solana_chains_commands_test.go new file mode 100644 index 00000000000..719204b0c1a --- /dev/null +++ b/core/cmd/solana_chains_commands_test.go @@ -0,0 +1,139 @@ +package cmd_test + +import ( + "flag" + "testing" + "time" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/smartcontractkit/chainlink/core/cmd" + "github.com/smartcontractkit/chainlink/core/internal/cltest" + "github.com/smartcontractkit/chainlink/core/internal/testutils/solanatest" + "github.com/smartcontractkit/chainlink/core/store/models" +) + +func TestClient_IndexSolanaChains(t *testing.T) { + t.Parallel() + + app := solanaStartNewApplication(t) + client, r := app.NewClientAndRenderer() + + orm := app.Chains.Solana.ORM() + _, initialCount, err := orm.Chains(0, 25) + require.NoError(t, err) + + chain, err := orm.CreateChain(solanatest.RandomChainID(), db.ChainCfg{}) + require.NoError(t, err) + + require.Nil(t, client.IndexSolanaChains(cltest.EmptyCLIContext())) + chains := *r.Renders[0].(*cmd.SolanaChainPresenters) + require.Len(t, chains, initialCount+1) + c := chains[initialCount] + assert.Equal(t, chain.ID, c.ID) + assertTableRenders(t, r) +} + +func TestClient_CreateSolanaChain(t *testing.T) { + t.Parallel() + + app := solanaStartNewApplication(t) + client, r := app.NewClientAndRenderer() + + orm := app.Chains.Solana.ORM() + _, initialCount, err := orm.Chains(0, 25) + require.NoError(t, err) + + solanaChainID := solanatest.RandomChainID() + set := flag.NewFlagSet("cli", 0) + set.String("id", solanaChainID, "") + set.Parse([]string{`{}`}) + c := cli.NewContext(nil, set, nil) + + err = client.CreateSolanaChain(c) + require.NoError(t, err) + + chains, _, err := orm.Chains(0, 25) + require.NoError(t, err) + require.Len(t, chains, initialCount+1) + ch := chains[initialCount] + assert.Equal(t, solanaChainID, ch.ID) + assertTableRenders(t, r) +} + +func TestClient_RemoveSolanaChain(t *testing.T) { + t.Parallel() + + app := solanaStartNewApplication(t) + client, r := app.NewClientAndRenderer() + + orm := app.Chains.Solana.ORM() + _, initialCount, err := orm.Chains(0, 25) + require.NoError(t, err) + + solanaChainID := solanatest.RandomChainID() + _, err = orm.CreateChain(solanaChainID, db.ChainCfg{}) + require.NoError(t, err) + chains, _, err := orm.Chains(0, 25) + require.NoError(t, err) + require.Len(t, chains, initialCount+1) + + set := flag.NewFlagSet("cli", 0) + set.Parse([]string{solanaChainID}) + c := cli.NewContext(nil, set, nil) + + err = client.RemoveSolanaChain(c) + require.NoError(t, err) + + chains, _, err = orm.Chains(0, 25) + require.NoError(t, err) + require.Len(t, chains, initialCount) + assertTableRenders(t, r) +} + +func TestClient_ConfigureSolanaChain(t *testing.T) { + t.Parallel() + + app := solanaStartNewApplication(t) + client, r := app.NewClientAndRenderer() + + orm := app.Chains.Solana.ORM() + + _, initialCount, err := orm.Chains(0, 25) + require.NoError(t, err) + + solanaChainID := solanatest.RandomChainID() + minute := models.MustMakeDuration(time.Minute) + hour := models.MustMakeDuration(time.Hour) + original := db.ChainCfg{ + ConfirmPollPeriod: &minute, + TxTimeout: &hour, + } + _, err = orm.CreateChain(solanaChainID, original) + require.NoError(t, err) + chains, _, err := orm.Chains(0, 25) + require.NoError(t, err) + require.Len(t, chains, initialCount+1) + + set := flag.NewFlagSet("cli", 0) + set.String("id", solanaChainID, "param") + set.Parse([]string{ + "TxTimeout=1h", + }) + c := cli.NewContext(nil, set, nil) + + err = client.ConfigureSolanaChain(c) + require.NoError(t, err) + + chains, _, err = orm.Chains(0, 25) + require.NoError(t, err) + ch := chains[initialCount] + + assert.Equal(t, solanaChainID, ch.ID) + assert.Equal(t, original.ConfirmPollPeriod, ch.Cfg.ConfirmPollPeriod) + assert.Equal(t, original.TxTimeout, ch.Cfg.TxTimeout) + assertTableRenders(t, r) +} diff --git a/core/cmd/solana_commands_test.go b/core/cmd/solana_commands_test.go new file mode 100644 index 00000000000..f899d83d5b6 --- /dev/null +++ b/core/cmd/solana_commands_test.go @@ -0,0 +1,48 @@ +package cmd_test + +import ( + "flag" + "testing" + + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" +) + +func TestClient_SolanaInit(t *testing.T) { + t.Parallel() + + app := solanaStartNewApplication(t) + client, r := app.NewClientAndRenderer() + + newNode := db.NewNode{ + Name: "first", + SolanaChainID: "Columbus-5", + SolanaURL: "https://solana.example", + } + set := flag.NewFlagSet("cli", 0) + set.String("name", newNode.Name, "") + set.String("url", newNode.SolanaURL, "") + set.String("chain-id", newNode.SolanaChainID, "") + + // Try to add node + c := cli.NewContext(nil, set, nil) + err := client.CreateSolanaNode(c) + require.Error(t, err) + + // Chain first + setCh := flag.NewFlagSet("cli", 0) + setCh.String("id", newNode.SolanaChainID, "") + setCh.Parse([]string{`{}`}) + cCh := cli.NewContext(nil, setCh, nil) + err = client.CreateSolanaChain(cCh) + require.NoError(t, err) + + // Then node + c = cli.NewContext(nil, set, nil) + err = client.CreateSolanaNode(c) + require.NoError(t, err) + + assertTableRenders(t, r) +} diff --git a/core/cmd/solana_node_commands.go b/core/cmd/solana_node_commands.go new file mode 100644 index 00000000000..b06e345d3fe --- /dev/null +++ b/core/cmd/solana_node_commands.go @@ -0,0 +1,126 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "fmt" + "net/url" + + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + + "github.com/smartcontractkit/chainlink/core/web/presenters" +) + +// SolanaNodePresenter implements TableRenderer for a SolanaNodeResource. +type SolanaNodePresenter struct { + presenters.SolanaNodeResource +} + +// ToRow presents the SolanaNodeResource as a slice of strings. +func (p *SolanaNodePresenter) ToRow() []string { + row := []string{ + p.GetID(), + p.Name, + p.SolanaChainID, + p.SolanaURL, + p.CreatedAt.String(), + p.UpdatedAt.String(), + } + return row +} + +var solanaNodeHeaders = []string{"ID", "Name", "Chain ID", "URL", "Created", "Updated"} + +// RenderTable implements TableRenderer +func (p SolanaNodePresenter) RenderTable(rt RendererTable) error { + var rows [][]string + rows = append(rows, p.ToRow()) + renderList(solanaNodeHeaders, rows, rt.Writer) + + return nil +} + +// SolanaNodePresenters implements TableRenderer for a slice of SolanaNodePresenter. +type SolanaNodePresenters []SolanaNodePresenter + +// RenderTable implements TableRenderer +func (ps SolanaNodePresenters) RenderTable(rt RendererTable) error { + var rows [][]string + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(solanaNodeHeaders, rows, rt.Writer) + + return nil +} + +// IndexSolanaNodes returns all Solana nodes. +func (cli *Client) IndexSolanaNodes(c *cli.Context) (err error) { + return cli.getPage("/v2/nodes/solana", c.Int("page"), &SolanaNodePresenters{}) +} + +// CreateSolanaNode adds a new node to the nodelink node +func (cli *Client) CreateSolanaNode(c *cli.Context) (err error) { + name := c.String("name") + chainID := c.String("chain-id") + urlStr := c.String("url") + + if name == "" { + return cli.errorOut(errors.New("missing --name")) + } + if chainID == "" { + return cli.errorOut(errors.New("missing --chain-id")) + } + + if _, err2 := url.Parse(urlStr); err2 != nil { + return cli.errorOut(errors.Errorf("invalid url: %v", err2)) + } + + params := db.NewNode{ + Name: name, + SolanaChainID: chainID, + SolanaURL: urlStr, + } + + body, err := json.Marshal(params) + if err != nil { + return cli.errorOut(err) + } + + resp, err := cli.HTTP.Post("/v2/nodes/solana", bytes.NewBuffer(body)) + if err != nil { + return cli.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return cli.renderAPIResponse(resp, &SolanaNodePresenter{}) +} + +// RemoveSolanaNode removes a specific Solana Node by name. +func (cli *Client) RemoveSolanaNode(c *cli.Context) (err error) { + if !c.Args().Present() { + return cli.errorOut(errors.New("must pass the id of the node to be removed")) + } + nodeID := c.Args().First() + resp, err := cli.HTTP.Delete("/v2/nodes/solana/" + nodeID) + if err != nil { + return cli.errorOut(err) + } + _, err = cli.parseResponse(resp) + if err != nil { + return cli.errorOut(err) + } + + fmt.Printf("Node %v deleted\n", c.Args().First()) + return nil +} diff --git a/core/cmd/solana_node_commands_test.go b/core/cmd/solana_node_commands_test.go new file mode 100644 index 00000000000..1d7be64ac8d --- /dev/null +++ b/core/cmd/solana_node_commands_test.go @@ -0,0 +1,160 @@ +package cmd_test + +import ( + "flag" + "fmt" + "math/rand" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + "gopkg.in/guregu/null.v4" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + + "github.com/smartcontractkit/chainlink/core/chains/solana" + "github.com/smartcontractkit/chainlink/core/cmd" + "github.com/smartcontractkit/chainlink/core/internal/cltest" + "github.com/smartcontractkit/chainlink/core/internal/testutils/configtest" +) + +func mustInsertSolanaChain(t *testing.T, orm solana.ORM, id string) solana.Chain { + chain, err := orm.CreateChain(id, db.ChainCfg{}) + require.NoError(t, err) + return chain +} + +func solanaStartNewApplication(t *testing.T) *cltest.TestApplication { + return startNewApplication(t, withConfigSet(func(c *configtest.TestGeneralConfig) { + c.Overrides.SolanaEnabled = null.BoolFrom(true) + c.Overrides.EVMEnabled = null.BoolFrom(false) + c.Overrides.EVMRPCEnabled = null.BoolFrom(false) + })) +} + +func TestClient_IndexSolanaNodes(t *testing.T) { + t.Parallel() + + app := solanaStartNewApplication(t) + client, r := app.NewClientAndRenderer() + + orm := app.Chains.Solana.ORM() + _, initialCount, err := orm.Nodes(0, 25) + require.NoError(t, err) + chainID := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) + _ = mustInsertSolanaChain(t, orm, chainID) + + params := db.Node{ + Name: "second", + SolanaChainID: chainID, + SolanaURL: "https://solana.example", + } + node, err := orm.CreateNode(params) + require.NoError(t, err) + + require.Nil(t, client.IndexSolanaNodes(cltest.EmptyCLIContext())) + require.NotEmpty(t, r.Renders) + nodes := *r.Renders[0].(*cmd.SolanaNodePresenters) + require.Len(t, nodes, initialCount+1) + n := nodes[initialCount] + assert.Equal(t, strconv.FormatInt(int64(node.ID), 10), n.ID) + assert.Equal(t, params.Name, n.Name) + assert.Equal(t, params.SolanaChainID, n.SolanaChainID) + assert.Equal(t, params.SolanaURL, n.SolanaURL) + assertTableRenders(t, r) +} + +func TestClient_CreateSolanaNode(t *testing.T) { + t.Parallel() + + app := solanaStartNewApplication(t) + client, r := app.NewClientAndRenderer() + + orm := app.Chains.Solana.ORM() + _, initialNodesCount, err := orm.Nodes(0, 25) + require.NoError(t, err) + chainIDA := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) + chainIDB := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) + _ = mustInsertSolanaChain(t, orm, chainIDA) + _ = mustInsertSolanaChain(t, orm, chainIDB) + + set := flag.NewFlagSet("cli", 0) + set.String("name", "first", "") + set.String("url", "http://tender.mint.test/columbus-5", "") + set.String("chain-id", chainIDA, "") + c := cli.NewContext(nil, set, nil) + err = client.CreateSolanaNode(c) + require.NoError(t, err) + + set = flag.NewFlagSet("cli", 0) + set.String("name", "second", "") + set.String("url", "http://tender.mint.test/bombay-12", "") + set.String("chain-id", chainIDB, "") + c = cli.NewContext(nil, set, nil) + err = client.CreateSolanaNode(c) + require.NoError(t, err) + + nodes, _, err := orm.Nodes(0, 25) + require.NoError(t, err) + require.Len(t, nodes, initialNodesCount+2) + n := nodes[initialNodesCount] + assertEqualNodesSolana(t, db.Node{ + Name: "first", + SolanaChainID: chainIDA, + SolanaURL: "http://tender.mint.test/columbus-5", + }, n) + n = nodes[initialNodesCount+1] + assertEqualNodesSolana(t, db.Node{ + Name: "second", + SolanaChainID: chainIDB, + SolanaURL: "http://tender.mint.test/bombay-12", + }, n) + + assertTableRenders(t, r) +} + +func TestClient_RemoveSolanaNode(t *testing.T) { + t.Parallel() + + app := solanaStartNewApplication(t) + client, r := app.NewClientAndRenderer() + + orm := app.Chains.Solana.ORM() + _, initialCount, err := orm.Nodes(0, 25) + require.NoError(t, err) + chainID := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) + _ = mustInsertSolanaChain(t, orm, chainID) + + params := db.Node{ + Name: "first", + SolanaChainID: chainID, + SolanaURL: "http://tender.mint.test/columbus-5", + } + node, err := orm.CreateNode(params) + require.NoError(t, err) + chains, _, err := orm.Nodes(0, 25) + require.NoError(t, err) + require.Len(t, chains, initialCount+1) + + set := flag.NewFlagSet("cli", 0) + set.Parse([]string{strconv.FormatInt(int64(node.ID), 10)}) + c := cli.NewContext(nil, set, nil) + + err = client.RemoveSolanaNode(c) + require.NoError(t, err) + + chains, _, err = orm.Nodes(0, 25) + require.NoError(t, err) + require.Len(t, chains, initialCount) + assertTableRenders(t, r) +} + +func assertEqualNodesSolana(t *testing.T, newNode db.Node, gotNode db.Node) { + t.Helper() + + assert.Equal(t, newNode.Name, gotNode.Name) + assert.Equal(t, newNode.SolanaChainID, gotNode.SolanaChainID) + assert.Equal(t, newNode.SolanaURL, gotNode.SolanaURL) +} diff --git a/core/cmd/solana_transaction_commands.go b/core/cmd/solana_transaction_commands.go new file mode 100644 index 00000000000..9611b0aa52d --- /dev/null +++ b/core/cmd/solana_transaction_commands.go @@ -0,0 +1,96 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + + solanaGo "github.com/gagliardetto/solana-go" + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + "github.com/smartcontractkit/chainlink/core/store/models/solana" + "github.com/smartcontractkit/chainlink/core/web/presenters" +) + +type SolanaMsgPresenter struct { + JAID + presenters.SolanaMsgResource +} + +// RenderTable implements TableRenderer +func (p *SolanaMsgPresenter) RenderTable(rt RendererTable) error { + table := rt.newTable([]string{"Chain ID", "From", "To", "Amount"}) + table.Append([]string{ + p.ChainID, + p.From, + p.To, + strconv.FormatUint(p.Amount, 10), + }) + + render(fmt.Sprintf("Solana Message %v", p.ID), table) + return nil +} + +// SolanaSendSol transfers sol from the node's account to a specified address. +func (cli *Client) SolanaSendSol(c *cli.Context) (err error) { + if c.NArg() < 3 { + return cli.errorOut(errors.New("three arguments expected: amount, fromAddress and toAddress")) + } + + amount, err := strconv.ParseUint(c.Args().Get(0), 10, 64) + if err != nil { + return cli.errorOut(fmt.Errorf("invalid amount: %w", err)) + } + + unparsedFromAddress := c.Args().Get(1) + fromAddress, err := solanaGo.PublicKeyFromBase58(unparsedFromAddress) + if err != nil { + return cli.errorOut(multierr.Combine( + errors.Errorf("while parsing withdrawal source address %v", + unparsedFromAddress), err)) + } + + unparsedDestinationAddress := c.Args().Get(2) + destinationAddress, err := solanaGo.PublicKeyFromBase58(unparsedDestinationAddress) + if err != nil { + return cli.errorOut(multierr.Combine( + errors.Errorf("while parsing withdrawal destination address %v", + unparsedDestinationAddress), err)) + } + + chainID := c.String("id") + if chainID == "" { + return cli.errorOut(errors.New("missing id")) + } + + request := solana.SendRequest{ + To: destinationAddress, + From: fromAddress, + Amount: amount, + SolanaChainID: chainID, + AllowHigherAmounts: c.IsSet("force"), + } + + requestData, err := json.Marshal(request) + if err != nil { + return cli.errorOut(err) + } + + buf := bytes.NewBuffer(requestData) + + resp, err := cli.HTTP.Post("/v2/transfers/solana", buf) + if err != nil { + return cli.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + err = cli.renderAPIResponse(resp, &SolanaMsgPresenter{}) + return err +} diff --git a/core/cmd/solana_transaction_commands_test.go b/core/cmd/solana_transaction_commands_test.go new file mode 100644 index 00000000000..776b83b07a3 --- /dev/null +++ b/core/cmd/solana_transaction_commands_test.go @@ -0,0 +1,113 @@ +//go:build integration + +package cmd_test + +import ( + "flag" + "fmt" + "strconv" + "testing" + "time" + + "github.com/gagliardetto/solana-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + solanaClient "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" + solanadb "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + + "github.com/smartcontractkit/chainlink/core/cmd" + "github.com/smartcontractkit/chainlink/core/internal/testutils" +) + +func TestClient_SolanaSendSol(t *testing.T) { + chainID := "localnet" + url := solanaClient.SetupLocalSolNode(t) + app := solanaStartNewApplication(t) + from, err := app.GetKeyStore().Solana().Create() + require.NoError(t, err) + to, err := solana.NewRandomPrivateKey() + require.NoError(t, err) + solanaClient.FundTestAccounts(t, []solana.PublicKey{from.PublicKey()}, url) + + chains := app.GetChains() + _, err = chains.Solana.Add(testutils.Context(t), chainID, solanadb.ChainCfg{}) + require.NoError(t, err) + chain, err := chains.Solana.Chain(testutils.Context(t), chainID) + require.NoError(t, err) + + _, err = chains.Solana.ORM().CreateNode(solanadb.Node{ + Name: t.Name(), + SolanaChainID: chainID, + SolanaURL: url, + }) + require.NoError(t, err) + + reader, err := chain.Reader() + require.NoError(t, err) + + require.Eventually(t, func() bool { + coin, err := reader.Balance(from.PublicKey()) + if !assert.NoError(t, err) { + return false + } + return coin == 100*solana.LAMPORTS_PER_SOL + }, time.Minute, 5*time.Second) + + client, r := app.NewClientAndRenderer() + cliapp := cli.NewApp() + + for _, tt := range []struct { + amount string + expErr string + }{ + {amount: "1000000000"}, + {amount: "100000000000", expErr: "is too low for this transaction to be executed:"}, + {amount: "0", expErr: "amount must be greater than zero"}, + {amount: "asdf", expErr: "invalid amount:"}, + } { + tt := tt + t.Run(tt.amount, func(t *testing.T) { + startBal, err := reader.Balance(from.PublicKey()) + require.NoError(t, err) + + set := flag.NewFlagSet("sendsolcoins", 0) + set.String("id", chainID, "") + set.Parse([]string{tt.amount, from.PublicKey().String(), to.PublicKey().String()}) + c := cli.NewContext(cliapp, set, nil) + err = client.SolanaSendSol(c) + if tt.expErr == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expErr) + return + } + + // Check CLI output + require.Greater(t, len(r.Renders), 0) + renderer := r.Renders[len(r.Renders)-1] + renderedMsg := renderer.(*cmd.SolanaMsgPresenter) + fmt.Printf("%+v\n", renderedMsg) + require.NotEmpty(t, renderedMsg.ID) + assert.Equal(t, chainID, renderedMsg.ChainID) + assert.Equal(t, from.PublicKey().String(), renderedMsg.From) + assert.Equal(t, to.PublicKey().String(), renderedMsg.To) + assert.Equal(t, tt.amount, strconv.FormatUint(renderedMsg.Amount, 10)) + + time.Sleep(time.Second) // wait for tx execution + + // Check balance + endBal, err := reader.Balance(from.PublicKey()) + require.NoError(t, err) + if assert.NotEqual(t, 0, startBal) && assert.NotEqual(t, 0, endBal) { + diff := startBal - endBal + receiveBal, err := reader.Balance(to.PublicKey()) + require.NoError(t, err) + assert.Equal(t, tt.amount, strconv.FormatUint(receiveBal, 10)) + assert.Greater(t, diff, receiveBal) + } + }) + } +} diff --git a/core/cmd/terra_node_commands_test.go b/core/cmd/terra_node_commands_test.go index cfa82ee620c..3c7b8a34a96 100644 --- a/core/cmd/terra_node_commands_test.go +++ b/core/cmd/terra_node_commands_test.go @@ -20,7 +20,7 @@ import ( "github.com/smartcontractkit/chainlink/core/internal/testutils/configtest" ) -func mustInsertTerraChain(t *testing.T, orm types.ORM, id string) db.Chain { +func mustInsertTerraChain(t *testing.T, orm types.ORM, id string) types.Chain { chain, err := orm.CreateChain(id, db.ChainCfg{}) require.NoError(t, err) return chain @@ -46,7 +46,7 @@ func TestClient_IndexTerraNodes(t *testing.T) { chainID := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) _ = mustInsertTerraChain(t, orm, chainID) - params := types.NewNode{ + params := db.Node{ Name: "second", TerraChainID: chainID, TendermintURL: "http://tender.mint.test/bombay-12", @@ -102,13 +102,13 @@ func TestClient_CreateTerraNode(t *testing.T) { require.NoError(t, err) require.Len(t, nodes, initialNodesCount+2) n := nodes[initialNodesCount] - assertEqual(t, types.NewNode{ + assertEqualNodesTerra(t, types.NewNode{ Name: "first", TerraChainID: chainIDA, TendermintURL: "http://tender.mint.test/columbus-5", }, n) n = nodes[initialNodesCount+1] - assertEqual(t, types.NewNode{ + assertEqualNodesTerra(t, types.NewNode{ Name: "second", TerraChainID: chainIDB, TendermintURL: "http://tender.mint.test/bombay-12", @@ -129,7 +129,7 @@ func TestClient_RemoveTerraNode(t *testing.T) { chainID := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) _ = mustInsertTerraChain(t, orm, chainID) - params := types.NewNode{ + params := db.Node{ Name: "first", TerraChainID: chainID, TendermintURL: "http://tender.mint.test/columbus-5", @@ -153,7 +153,7 @@ func TestClient_RemoveTerraNode(t *testing.T) { assertTableRenders(t, r) } -func assertEqual(t *testing.T, newNode types.NewNode, gotNode db.Node) { +func assertEqualNodesTerra(t *testing.T, newNode types.NewNode, gotNode db.Node) { t.Helper() assert.Equal(t, newNode.Name, gotNode.Name) diff --git a/core/cmd/terra_transaction_commands_test.go b/core/cmd/terra_transaction_commands_test.go index b23b966bb3b..a90eb36e12a 100644 --- a/core/cmd/terra_transaction_commands_test.go +++ b/core/cmd/terra_transaction_commands_test.go @@ -18,7 +18,6 @@ import ( "github.com/smartcontractkit/chainlink/core/chains/terra/denom" "github.com/smartcontractkit/chainlink/core/chains/terra/terratxm" - terratypes "github.com/smartcontractkit/chainlink/core/chains/terra/types" "github.com/smartcontractkit/chainlink/core/cmd" "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" @@ -43,7 +42,7 @@ func TestClient_SendTerraCoins(t *testing.T) { chain, err := chains.Terra.Chain(testutils.Context(t), chainID) require.NoError(t, err) - _, err = chains.Terra.ORM().CreateNode(terratypes.NewNode{ + _, err = chains.Terra.ORM().CreateNode(terradb.Node{ Name: t.Name(), TerraChainID: chainID, TendermintURL: tendermintURL, diff --git a/core/chains/chaintype.go b/core/config/chaintype.go similarity index 62% rename from core/chains/chaintype.go rename to core/config/chaintype.go index 051b5ef1d10..88786857371 100644 --- a/core/chains/chaintype.go +++ b/core/config/chaintype.go @@ -1,20 +1,20 @@ -package chains +package config // ChainType denotes the chain or network to work with type ChainType string //nolint const ( - Arbitrum ChainType = "arbitrum" - ExChain ChainType = "exchain" - Optimism ChainType = "optimism" - XDai ChainType = "xdai" + ChainArbitrum ChainType = "arbitrum" + ChainExChain ChainType = "exchain" + ChainOptimism ChainType = "optimism" + ChainXDai ChainType = "xdai" ) // IsValid returns true if the ChainType value is known or empty. func (c ChainType) IsValid() bool { switch c { - case "", Arbitrum, ExChain, Optimism, XDai: + case "", ChainArbitrum, ChainExChain, ChainOptimism, ChainXDai: return true } return false @@ -24,10 +24,10 @@ func (c ChainType) IsValid() bool { // used for log searching are different from calling block.number func (c ChainType) IsL2() bool { switch c { - case Arbitrum, Optimism: + case ChainArbitrum, ChainOptimism: return true - case XDai, ExChain: + case ChainXDai, ChainExChain: fallthrough default: return false diff --git a/core/config/config_internal_test.go b/core/config/config_internal_test.go index cfca8e34750..f62c2053b08 100644 --- a/core/config/config_internal_test.go +++ b/core/config/config_internal_test.go @@ -19,7 +19,7 @@ import ( func TestGeneralConfig_Defaults(t *testing.T) { config := NewGeneralConfig(logger.TestLogger(t)) assert.Equal(t, uint64(10), config.BlockBackfillDepth()) - assert.Equal(t, new(url.URL), config.BridgeResponseURL()) + assert.Equal(t, (*url.URL)(nil), config.BridgeResponseURL()) assert.Nil(t, config.DefaultChainID()) assert.True(t, config.EVMRPCEnabled()) assert.True(t, config.EVMEnabled()) diff --git a/core/config/envvar/envvar.go b/core/config/envvar/envvar.go index 4d494293d35..3d3161f30b1 100644 --- a/core/config/envvar/envvar.go +++ b/core/config/envvar/envvar.go @@ -4,35 +4,45 @@ import ( "fmt" "log" "os" + "strconv" + "time" "github.com/pkg/errors" "go.uber.org/zap/zapcore" "github.com/smartcontractkit/chainlink/core/config/parse" - "github.com/smartcontractkit/chainlink/core/utils" ) +//nolint var ( - // LogLevel reprents a parseable version of the `LOG_LEVEL`env var. - LogLevel = New("LogLevel", parse.LogLevel) - // RootDir reprents a parseable version of the `ROOT`env var. - RootDir = New("RootDir", parse.HomeDir) - // JSONConsole reprents a parseable version of the `JSON_CONSOLE`env var. - JSONConsole = New("JSONConsole", parse.Bool) - // LogFileMaxSize reprents a parseable version of the `LOG_FILE_MAX_SIZE`env var. - LogFileMaxSize = New("LogFileMaxSize", parse.FileSize) - // LogFileMaxAge reprents a parseable version of the `LOG_FILE_MAX_AGE`env var. - LogFileMaxAge = New("LogFileMaxAge", parse.Int64) - // LogFileMaxBackups reprents a parseable version of the `LOG_FILE_MAX_BACKUPS`env var. - LogFileMaxBackups = New("LogFileMaxBackups", parse.Int64) - // LogUnixTS reprents a parseable version of the `LOG_UNIX_TS`env var. - LogUnixTS = New("LogUnixTS", parse.Bool) + AdvisoryLockID = NewInt64("AdvisoryLockID") + AuthenticatedRateLimitPeriod = NewDuration("AuthenticatedRateLimitPeriod") + AutoPprofPollInterval = NewDuration("AutoPprofPollInterval") + AutoPprofGatherDuration = NewDuration("AutoPprofGatherDuration") + AutoPprofGatherTraceDuration = NewDuration("AutoPprofGatherTraceDuration") + BlockBackfillDepth = NewUint64("BlockBackfillDepth") + HTTPServerWriteTimeout = NewDuration("HTTPServerWriteTimeout") + JobPipelineMaxRunDuration = NewDuration("JobPipelineMaxRunDuration") + JobPipelineResultWriteQueueDepth = NewUint64("JobPipelineResultWriteQueueDepth") + JobPipelineReaperInterval = NewDuration("JobPipelineReaperInterval") + JobPipelineReaperThreshold = NewDuration("JobPipelineReaperThreshold") + KeeperRegistryCheckGasOverhead = NewUint64("KeeperRegistryCheckGasOverhead") + KeeperRegistryPerformGasOverhead = NewUint64("KeeperRegistryPerformGasOverhead") + KeeperRegistrySyncInterval = NewDuration("KeeperRegistrySyncInterval") + KeeperRegistrySyncUpkeepQueueSize = NewUint32("KeeperRegistrySyncUpkeepQueueSize") + LogLevel = New[zapcore.Level]("LogLevel", parse.LogLevel) + RootDir = New[string]("RootDir", parse.HomeDir) + JSONConsole = NewBool("JSONConsole") + LogFileMaxSize = New("LogFileMaxSize", parse.FileSize) + LogFileMaxAge = New("LogFileMaxAge", parse.Int64) + LogFileMaxBackups = New("LogFileMaxBackups", parse.Int64) + LogUnixTS = NewBool("LogUnixTS") ) -// EnvVar is an environment variable which -type EnvVar struct { +// EnvVar is an environment variable parsed as T. +type EnvVar[T any] struct { name string - parse func(string) (interface{}, error) + parse func(string) (T, error) envVarName string defaultValue string @@ -41,14 +51,14 @@ type EnvVar struct { // New creates a new EnvVar for the given name and parse func. // name must match the ConfigSchema field. -func New(name string, parse func(string) (interface{}, error)) *EnvVar { - e := &EnvVar{name: name, parse: parse, envVarName: Name(name)} +func New[T any](name string, parse func(string) (T, error)) *EnvVar[T] { + e := &EnvVar[T]{name: name, parse: parse, envVarName: Name(name)} e.defaultValue, e.hasDefault = DefaultValue(name) return e } // Parse attempts to parse the value returned from the environment, falling back to the default value when empty or invalid. -func (e *EnvVar) Parse() (v interface{}, invalid string) { +func (e *EnvVar[T]) Parse() (v T, invalid string) { var err error v, invalid, err = e.ParseFrom(os.Getenv) if err != nil { @@ -59,7 +69,7 @@ func (e *EnvVar) Parse() (v interface{}, invalid string) { // ParseFrom attempts to parse the value returned from calling get with the env var name, falling back to the default // value when empty or invalid. -func (e *EnvVar) ParseFrom(get func(string) string) (v interface{}, invalid string, err error) { +func (e *EnvVar[T]) ParseFrom(get func(string) string) (v T, invalid string, err error) { str := get(e.envVarName) if str != "" { v, err = e.parse(str) @@ -68,13 +78,14 @@ func (e *EnvVar) ParseFrom(get func(string) string) (v interface{}, invalid stri } var df interface{} = e.defaultValue if !e.hasDefault { - df = ZeroValue(e.name) + var t T + df = t } invalid = fmt.Sprintf(`Invalid value provided for %s, "%s" - falling back to default "%s": %v`, e.name, str, df, err) } if !e.hasDefault { - v = ZeroValue(e.name) + // zero value return } @@ -83,44 +94,30 @@ func (e *EnvVar) ParseFrom(get func(string) string) (v interface{}, invalid stri return } -// ParseString parses string -func (e *EnvVar) ParseString() (v string, invalid string) { - var i interface{} - i, invalid = e.Parse() - return i.(string), invalid +func NewString(name string) *EnvVar[string] { + return New[string](name, parse.String) } -// ParseBool parses bool -func (e *EnvVar) ParseBool() (v bool, invalid string) { - var i interface{} - i, invalid = e.Parse() - return i.(bool), invalid +func NewBool(name string) *EnvVar[bool] { + return New[bool](name, strconv.ParseBool) } -// ParseInt64 parses value into `int64` -func (e *EnvVar) ParseInt64() (v int64, invalid string) { - var i interface{} - i, invalid = e.Parse() - return i.(int64), invalid +func NewInt64(name string) *EnvVar[int64] { + return New[int64](name, parse.Int64) } -// ParseFileSize parses value into `utils.FileSize` -func (e *EnvVar) ParseFileSize() (v utils.FileSize, invalid string) { - var i interface{} - i, invalid = e.Parse() - return i.(utils.FileSize), invalid +func NewUint64(name string) *EnvVar[uint64] { + return New[uint64](name, parse.Uint64) } -// ParseLogLevel parses an env var's log level -func (e *EnvVar) ParseLogLevel() (v zapcore.Level, invalid string) { - var i interface{} - i, invalid = e.Parse() - var ll zapcore.Level - switch v := i.(type) { - case zapcore.Level: - ll = v - case *zapcore.Level: - ll = *v - } - return ll, invalid +func NewUint32(name string) *EnvVar[uint32] { + return New[uint32](name, parse.Uint32) +} + +func NewUint16(name string) *EnvVar[uint16] { + return New[uint16](name, parse.Uint16) +} + +func NewDuration(name string) *EnvVar[time.Duration] { + return New[time.Duration](name, time.ParseDuration) } diff --git a/core/config/envvar/schema.go b/core/config/envvar/schema.go index 7f573077fb5..9667e770501 100644 --- a/core/config/envvar/schema.go +++ b/core/config/envvar/schema.go @@ -116,11 +116,16 @@ type ConfigSchema struct { FeatureFeedsManager bool `env:"FEATURE_FEEDS_MANAGER" default:"false"` //nodoc FeatureUICSAKeys bool `env:"FEATURE_UI_CSA_KEYS" default:"false"` //nodoc + // LogPoller + FeatureLogPoller bool `env:"FEATURE_LOG_POLLER" default:"false"` //nodoc + // General chains/RPC - EVMEnabled bool `env:"EVM_ENABLED" default:"true"` - EVMRPCEnabled bool `env:"EVM_RPC_ENABLED" default:"true"` - SolanaEnabled bool `env:"SOLANA_ENABLED" default:"false"` - TerraEnabled bool `env:"TERRA_ENABLED" default:"false"` + EVMEnabled bool `env:"EVM_ENABLED" default:"true"` + EVMRPCEnabled bool `env:"EVM_RPC_ENABLED" default:"true"` + SolanaEnabled bool `env:"SOLANA_ENABLED" default:"false"` + SolanaNodes string `env:"SOLANA_NODES"` + TerraEnabled bool `env:"TERRA_ENABLED" default:"false"` + TerraNodes string `env:"TERRA_NODES"` // EVM/Ethereum // Legacy Eth ENV vars @@ -144,6 +149,7 @@ type ConfigSchema struct { EvmHeadTrackerMaxBufferSize uint `env:"ETH_HEAD_TRACKER_MAX_BUFFER_SIZE"` EvmHeadTrackerSamplingInterval time.Duration `env:"ETH_HEAD_TRACKER_SAMPLING_INTERVAL"` EvmLogBackfillBatchSize uint32 `env:"ETH_LOG_BACKFILL_BATCH_SIZE"` + EvmLogPollInterval time.Duration `env:"ETH_LOG_POLL_INTERVAL"` EvmRPCDefaultBatchSize uint32 `env:"ETH_RPC_DEFAULT_BATCH_SIZE"` LinkContractAddress string `env:"LINK_CONTRACT_ADDRESS"` MinIncomingConfirmations uint32 `env:"MIN_INCOMING_CONFIRMATIONS"` @@ -180,6 +186,7 @@ type ConfigSchema struct { EvmMaxInFlightTransactions uint32 `env:"ETH_MAX_IN_FLIGHT_TRANSACTIONS"` EvmMaxQueuedTransactions uint64 `env:"ETH_MAX_QUEUED_TRANSACTIONS"` EvmNonceAutoSync bool `env:"ETH_NONCE_AUTO_SYNC"` + EvmUseForwarders bool `env:"ETH_USE_FORWARDERS"` // Job Pipeline and tasks DefaultHTTPAllowUnrestrictedNetworkAccess bool `env:"DEFAULT_HTTP_ALLOW_UNRESTRICTED_NETWORK_ACCESS" default:"false"` @@ -268,6 +275,8 @@ type ConfigSchema struct { KeeperRegistryPerformGasOverhead uint64 `env:"KEEPER_REGISTRY_PERFORM_GAS_OVERHEAD" default:"150000"` KeeperRegistrySyncInterval time.Duration `env:"KEEPER_REGISTRY_SYNC_INTERVAL" default:"30m"` KeeperRegistrySyncUpkeepQueueSize uint32 `env:"KEEPER_REGISTRY_SYNC_UPKEEP_QUEUE_SIZE" default:"10"` + KeeperTurnLookBack int64 `env:"KEEPER_TURN_LOOK_BACK" default:"1000"` + KeeperTurnFlagEnabled bool `env:"KEEPER_TURN_FLAG_ENABLED" default:"false"` // CLI client AdminCredentialsFile string `env:"ADMIN_CREDENTIALS_FILE" default:"$ROOT/apicredentials"` @@ -318,16 +327,3 @@ func DefaultValue(name string) (string, bool) { log.Panicf("Invariant violated, no field of name %s found for DefaultValue", name) return "", false } - -// ZeroValue returns the zero value for a named field, or panics if it does not exist. -func ZeroValue(name string) interface{} { - schemaT := reflect.TypeOf(ConfigSchema{}) - if item, ok := schemaT.FieldByName(name); ok { - if item.Type.Kind() == reflect.Ptr { - return nil - } - return reflect.New(item.Type).Interface() - } - log.Panicf("Invariant violated, no field of name %s found for ZeroValue", name) - return nil -} diff --git a/core/config/envvar/schema_test.go b/core/config/envvar/schema_test.go index a3a14c1ad70..3b6fd5f98a4 100644 --- a/core/config/envvar/schema_test.go +++ b/core/config/envvar/schema_test.go @@ -66,7 +66,6 @@ func TestConfigSchema(t *testing.T) { "EthereumURL": "ETH_URL", "EthereumNodes": "EVM_NODES", "EvmBalanceMonitorBlockDelay": "ETH_BALANCE_MONITOR_BLOCK_DELAY", - "EvmDefaultBatchSize": "ETH_DEFAULT_BATCH_SIZE", "EvmEIP1559DynamicFees": "EVM_EIP1559_DYNAMIC_FEES", "EvmFinalityDepth": "ETH_FINALITY_DEPTH", "EvmGasBumpPercent": "ETH_GAS_BUMP_PERCENT", @@ -84,11 +83,13 @@ func TestConfigSchema(t *testing.T) { "EvmHeadTrackerMaxBufferSize": "ETH_HEAD_TRACKER_MAX_BUFFER_SIZE", "EvmHeadTrackerSamplingInterval": "ETH_HEAD_TRACKER_SAMPLING_INTERVAL", "EvmLogBackfillBatchSize": "ETH_LOG_BACKFILL_BATCH_SIZE", + "EvmLogPollInterval": "ETH_LOG_POLL_INTERVAL", "EvmMaxGasPriceWei": "ETH_MAX_GAS_PRICE_WEI", "EvmMaxInFlightTransactions": "ETH_MAX_IN_FLIGHT_TRANSACTIONS", "EvmMaxQueuedTransactions": "ETH_MAX_QUEUED_TRANSACTIONS", "EvmMinGasPriceWei": "ETH_MIN_GAS_PRICE_WEI", "EvmNonceAutoSync": "ETH_NONCE_AUTO_SYNC", + "EvmUseForwarders": "ETH_USE_FORWARDERS", "EvmRPCDefaultBatchSize": "ETH_RPC_DEFAULT_BATCH_SIZE", "ExplorerAccessKey": "EXPLORER_ACCESS_KEY", "ExplorerSecret": "EXPLORER_SECRET", @@ -97,6 +98,7 @@ func TestConfigSchema(t *testing.T) { "FMSimulateTransactions": "FM_SIMULATE_TRANSACTIONS", "FeatureExternalInitiators": "FEATURE_EXTERNAL_INITIATORS", "FeatureFeedsManager": "FEATURE_FEEDS_MANAGER", + "FeatureLogPoller": "FEATURE_LOG_POLLER", "FeatureOffchainReporting": "FEATURE_OFFCHAIN_REPORTING", "FeatureOffchainReporting2": "FEATURE_OFFCHAIN_REPORTING2", "FeatureUICSAKeys": "FEATURE_UI_CSA_KEYS", @@ -124,6 +126,8 @@ func TestConfigSchema(t *testing.T) { "KeeperRegistryPerformGasOverhead": "KEEPER_REGISTRY_PERFORM_GAS_OVERHEAD", "KeeperRegistrySyncInterval": "KEEPER_REGISTRY_SYNC_INTERVAL", "KeeperRegistrySyncUpkeepQueueSize": "KEEPER_REGISTRY_SYNC_UPKEEP_QUEUE_SIZE", + "KeeperTurnLookBack": "KEEPER_TURN_LOOK_BACK", + "KeeperTurnFlagEnabled": "KEEPER_TURN_FLAG_ENABLED", "LeaseLockDuration": "LEASE_LOCK_DURATION", "LeaseLockRefreshInterval": "LEASE_LOCK_REFRESH_INTERVAL", "LinkContractAddress": "LINK_CONTRACT_ADDRESS", @@ -157,6 +161,8 @@ func TestConfigSchema(t *testing.T) { "SessionTimeout": "SESSION_TIMEOUT", "ShutdownGracePeriod": "SHUTDOWN_GRACE_PERIOD", "SolanaEnabled": "SOLANA_ENABLED", + "SolanaNodes": "SOLANA_NODES", + "TerraNodes": "TERRA_NODES", "TLSCertPath": "TLS_CERT_PATH", "TLSHost": "CHAINLINK_TLS_HOST", "TLSKeyPath": "TLS_KEY_PATH", diff --git a/core/config/general_config.go b/core/config/general_config.go index 2faf946251f..489dd1dc430 100644 --- a/core/config/general_config.go +++ b/core/config/general_config.go @@ -8,6 +8,7 @@ import ( "path/filepath" "reflect" "regexp" + "strconv" "sync" "time" @@ -19,7 +20,6 @@ import ( "go.uber.org/zap/zapcore" "github.com/smartcontractkit/chainlink/core/assets" - "github.com/smartcontractkit/chainlink/core/chains" "github.com/smartcontractkit/chainlink/core/config/envvar" "github.com/smartcontractkit/chainlink/core/config/parse" "github.com/smartcontractkit/chainlink/core/logger" @@ -50,6 +50,7 @@ type FeatureFlags interface { FeatureOffchainReporting() bool FeatureOffchainReporting2() bool FeatureUICSAKeys() bool + FeatureLogPoller() bool AutoPprofEnabled() bool EVMEnabled() bool @@ -134,6 +135,8 @@ type GeneralOnlyConfig interface { KeeperRegistryPerformGasOverhead() uint64 KeeperRegistrySyncInterval() time.Duration KeeperRegistrySyncUpkeepQueueSize() uint32 + KeeperTurnLookBack() int64 + KeeperTurnFlagEnabled() bool KeyFile() string LeaseLockDuration() time.Duration LeaseLockRefreshInterval() time.Duration @@ -156,6 +159,8 @@ type GeneralOnlyConfig interface { SessionOptions() sessions.Options SessionSecret() ([]byte, error) SessionTimeout() models.Duration + SolanaNodes() string + TerraNodes() string TLSCertPath() string TLSDir() string TLSHost() string @@ -191,7 +196,6 @@ type GlobalConfig interface { GlobalEthTxReaperInterval() (time.Duration, bool) GlobalEthTxReaperThreshold() (time.Duration, bool) GlobalEthTxResendAfterThreshold() (time.Duration, bool) - GlobalEvmDefaultBatchSize() (uint32, bool) GlobalEvmEIP1559DynamicFees() (bool, bool) GlobalEvmFinalityDepth() (uint32, bool) GlobalEvmGasBumpPercent() (uint16, bool) @@ -209,11 +213,13 @@ type GlobalConfig interface { GlobalEvmHeadTrackerMaxBufferSize() (uint32, bool) GlobalEvmHeadTrackerSamplingInterval() (time.Duration, bool) GlobalEvmLogBackfillBatchSize() (uint32, bool) + GlobalEvmLogPollInterval() (time.Duration, bool) GlobalEvmMaxGasPriceWei() (*big.Int, bool) GlobalEvmMaxInFlightTransactions() (uint32, bool) GlobalEvmMaxQueuedTransactions() (uint64, bool) GlobalEvmMinGasPriceWei() (*big.Int, bool) GlobalEvmNonceAutoSync() (bool, bool) + GlobalEvmUseForwarders() (bool, bool) GlobalEvmRPCDefaultBatchSize() (uint32, bool) GlobalFlagsContractAddress() (string, bool) GlobalGasEstimatorMode() (string, bool) @@ -296,7 +302,7 @@ func newGeneralConfigWithViper(v *viper.Viper, lggr logger.Logger) (config *gene lggr.Warnf("Unable to load config file: %v\n", err) } - ll, invalid := envvar.LogLevel.ParseLogLevel() + ll, invalid := envvar.LogLevel.Parse() if invalid != "" { lggr.Error(invalid) } @@ -355,7 +361,7 @@ EVM_ENABLED=false return errors.Wrapf(err, "invalid monitoring url: %s", me) } } - if ct, set := c.GlobalChainType(); set && !chains.ChainType(ct).IsValid() { + if ct, set := c.GlobalChainType(); set && !ChainType(ct).IsValid() { return errors.Errorf("CHAIN_TYPE is invalid: %s", ct) } @@ -442,7 +448,7 @@ func (c *generalConfig) AuthenticatedRateLimit() int64 { // AuthenticatedRateLimitPeriod defines the period to which authenticated requests get limited func (c *generalConfig) AuthenticatedRateLimitPeriod() models.Duration { - return models.MustMakeDuration(c.getWithFallback("AuthenticatedRateLimitPeriod", parse.Duration).(time.Duration)) + return models.MustMakeDuration(getEnvWithFallback(c, envvar.AuthenticatedRateLimitPeriod)) } func (c *generalConfig) AutoPprofEnabled() bool { @@ -458,19 +464,19 @@ func (c *generalConfig) AutoPprofProfileRoot() string { } func (c *generalConfig) AutoPprofPollInterval() models.Duration { - return models.MustMakeDuration(c.getWithFallback("AutoPprofPollInterval", parse.Duration).(time.Duration)) + return models.MustMakeDuration(getEnvWithFallback(c, envvar.AutoPprofPollInterval)) } func (c *generalConfig) AutoPprofGatherDuration() models.Duration { - return models.MustMakeDuration(c.getWithFallback("AutoPprofGatherDuration", parse.Duration).(time.Duration)) + return models.MustMakeDuration(getEnvWithFallback(c, envvar.AutoPprofGatherDuration)) } func (c *generalConfig) AutoPprofGatherTraceDuration() models.Duration { - return models.MustMakeDuration(c.getWithFallback("AutoPprofGatherTraceDuration", parse.Duration).(time.Duration)) + return models.MustMakeDuration(getEnvWithFallback(c, envvar.AutoPprofGatherTraceDuration)) } func (c *generalConfig) AutoPprofMaxProfileSize() utils.FileSize { - return c.getWithFallback("AutoPprofMaxProfileSize", parse.FileSize).(utils.FileSize) + return getEnvWithFallback(c, envvar.New("AutoPprofMaxProfileSize", parse.FileSize)) } func (c *generalConfig) AutoPprofCPUProfileRate() int { @@ -490,7 +496,7 @@ func (c *generalConfig) AutoPprofMutexProfileFraction() int { } func (c *generalConfig) AutoPprofMemThreshold() utils.FileSize { - return c.getWithFallback("AutoPprofMemThreshold", parse.FileSize).(utils.FileSize) + return getEnvWithFallback(c, envvar.New("AutoPprofMemThreshold", parse.FileSize)) } func (c *generalConfig) AutoPprofGoroutineThreshold() int { @@ -500,17 +506,17 @@ func (c *generalConfig) AutoPprofGoroutineThreshold() int { // BlockBackfillDepth specifies the number of blocks before the current HEAD that the // log broadcaster will try to re-consume logs from func (c *generalConfig) BlockBackfillDepth() uint64 { - return c.getWithFallback("BlockBackfillDepth", parse.Uint64).(uint64) + return getEnvWithFallback(c, envvar.BlockBackfillDepth) } // BlockBackfillSkip enables skipping of very long log backfills func (c *generalConfig) BlockBackfillSkip() bool { - return c.getWithFallback("BlockBackfillSkip", parse.Bool).(bool) + return getEnvWithFallback(c, envvar.NewBool("BlockBackfillSkip")) } // BridgeResponseURL represents the URL for bridges to send a response to. func (c *generalConfig) BridgeResponseURL() *url.URL { - return c.getWithFallback("BridgeResponseURL", parse.URL).(*url.URL) + return getEnvWithFallback(c, envvar.New("BridgeResponseURL", url.Parse)) } // ClientNodeURL is the URL of the Ethereum node this Chainlink node should connect to. @@ -520,26 +526,26 @@ func (c *generalConfig) ClientNodeURL() string { // FeatureUICSAKeys enables the CSA Keys UI Feature. func (c *generalConfig) FeatureUICSAKeys() bool { - return c.getWithFallback("FeatureUICSAKeys", parse.Bool).(bool) + return getEnvWithFallback(c, envvar.NewBool("FeatureUICSAKeys")) } func (c *generalConfig) DatabaseListenerMinReconnectInterval() time.Duration { - return c.getWithFallback("DatabaseListenerMinReconnectInterval", parse.Duration).(time.Duration) + return getEnvWithFallback(c, envvar.NewDuration("DatabaseListenerMinReconnectInterval")) } func (c *generalConfig) DatabaseListenerMaxReconnectDuration() time.Duration { - return c.getWithFallback("DatabaseListenerMaxReconnectDuration", parse.Duration).(time.Duration) + return getEnvWithFallback(c, envvar.NewDuration("DatabaseListenerMaxReconnectDuration")) } // DatabaseBackupMode sets the database backup mode func (c *generalConfig) DatabaseBackupMode() DatabaseBackupMode { - return c.getWithFallback("DatabaseBackupMode", parseDatabaseBackupMode).(DatabaseBackupMode) + return getEnvWithFallback(c, envvar.New("DatabaseBackupMode", parseDatabaseBackupMode)) } // DatabaseBackupFrequency turns on the periodic database backup if set to a positive value // DatabaseBackupMode must be then set to a value other than "none" func (c *generalConfig) DatabaseBackupFrequency() time.Duration { - return c.getWithFallback("DatabaseBackupFrequency", parse.Duration).(time.Duration) + return getEnvWithFallback(c, envvar.NewDuration("DatabaseBackupFrequency")) } // DatabaseBackupURL configures the URL for the database to backup, if it's to be different from the main on @@ -559,7 +565,7 @@ func (c *generalConfig) DatabaseBackupURL() *url.URL { // DatabaseBackupOnVersionUpgrade controls whether an automatic backup will be // taken before migrations are run, if the node version has been bumped func (c *generalConfig) DatabaseBackupOnVersionUpgrade() bool { - return c.getWithFallback("DatabaseBackupOnVersionUpgrade", parse.Bool).(bool) + return getEnvWithFallback(c, envvar.NewBool("DatabaseBackupOnVersionUpgrade")) } // DatabaseBackupDir configures the directory for saving the backup file, if it's to be different from default one located in the RootDir @@ -596,7 +602,7 @@ func (c *generalConfig) DefaultHTTPLimit() int64 { // DefaultHTTPTimeout defines the default timeout for http requests func (c *generalConfig) DefaultHTTPTimeout() models.Duration { - return models.MustMakeDuration(c.getWithFallback("DefaultHTTPTimeout", parse.Duration).(time.Duration)) + return models.MustMakeDuration(getEnvWithFallback(c, envvar.NewDuration("DefaultHTTPTimeout"))) } // DefaultHTTPAllowUnrestrictedNetworkAccess controls whether http requests are unrestricted by default @@ -613,7 +619,7 @@ func (c *generalConfig) Dev() bool { // ShutdownGracePeriod is the maximum duration of graceful application shutdown. // If exceeded, it will try closing DB lock and connection and exit immediately to avoid SIGKILL. func (c *generalConfig) ShutdownGracePeriod() time.Duration { - return c.getWithFallback("ShutdownGracePeriod", parse.Duration).(time.Duration) + return getEnvWithFallback(c, envvar.NewDuration("ShutdownGracePeriod")) } // FeatureExternalInitiators enables the External Initiator feature. @@ -626,14 +632,18 @@ func (c *generalConfig) FeatureFeedsManager() bool { return c.viper.GetBool(envvar.Name("FeatureFeedsManager")) } +func (c *generalConfig) FeatureLogPoller() bool { + return c.viper.GetBool(envvar.Name("FeatureLogPoller")) +} + // FeatureOffchainReporting enables the OCR job type. func (c *generalConfig) FeatureOffchainReporting() bool { - return c.getWithFallback("FeatureOffchainReporting", parse.Bool).(bool) + return getEnvWithFallback(c, envvar.NewBool("FeatureOffchainReporting")) } // FeatureOffchainReporting2 enables the OCR2 job type. func (c *generalConfig) FeatureOffchainReporting2() bool { - return c.getWithFallback("FeatureOffchainReporting2", parse.Bool).(bool) + return getEnvWithFallback(c, envvar.NewBool("FeatureOffchainReporting2")) } // FMDefaultTransactionQueueDepth controls the queue size for DropOldestStrategy in Flux Monitor @@ -760,36 +770,36 @@ func (c *generalConfig) InsecureSkipVerify() bool { } func (c *generalConfig) TriggerFallbackDBPollInterval() time.Duration { - return c.getWithFallback("TriggerFallbackDBPollInterval", parse.Duration).(time.Duration) + return getEnvWithFallback(c, envvar.NewDuration("TriggerFallbackDBPollInterval")) } // JobPipelineMaxRunDuration is the maximum time that a job run may take func (c *generalConfig) JobPipelineMaxRunDuration() time.Duration { - return c.getWithFallback("JobPipelineMaxRunDuration", parse.Duration).(time.Duration) + return getEnvWithFallback(c, envvar.JobPipelineMaxRunDuration) } func (c *generalConfig) JobPipelineResultWriteQueueDepth() uint64 { - return c.getWithFallback("JobPipelineResultWriteQueueDepth", parse.Uint64).(uint64) + return getEnvWithFallback(c, envvar.JobPipelineResultWriteQueueDepth) } func (c *generalConfig) JobPipelineReaperInterval() time.Duration { - return c.getWithFallback("JobPipelineReaperInterval", parse.Duration).(time.Duration) + return getEnvWithFallback(c, envvar.JobPipelineReaperInterval) } func (c *generalConfig) JobPipelineReaperThreshold() time.Duration { - return c.getWithFallback("JobPipelineReaperThreshold", parse.Duration).(time.Duration) + return getEnvWithFallback(c, envvar.JobPipelineReaperThreshold) } // KeeperRegistryCheckGasOverhead is the amount of extra gas to provide checkUpkeep() calls // to account for the gas consumed by the keeper registry func (c *generalConfig) KeeperRegistryCheckGasOverhead() uint64 { - return c.getWithFallback("KeeperRegistryCheckGasOverhead", parse.Uint64).(uint64) + return getEnvWithFallback(c, envvar.KeeperRegistryCheckGasOverhead) } // KeeperRegistryPerformGasOverhead is the amount of extra gas to provide performUpkeep() calls // to account for the gas consumed by the keeper registry func (c *generalConfig) KeeperRegistryPerformGasOverhead() uint64 { - return c.getWithFallback("KeeperRegistryPerformGasOverhead", parse.Uint64).(uint64) + return getEnvWithFallback(c, envvar.KeeperRegistryPerformGasOverhead) } // KeeperDefaultTransactionQueueDepth controls the queue size for DropOldestStrategy in Keeper @@ -819,7 +829,7 @@ func (c *generalConfig) KeeperBaseFeeBufferPercent() uint32 { // KeeperRegistrySyncInterval is the interval in which the RegistrySynchronizer performs a full // sync of the keeper registry contract it is tracking func (c *generalConfig) KeeperRegistrySyncInterval() time.Duration { - return c.getWithFallback("KeeperRegistrySyncInterval", parse.Duration).(time.Duration) + return getEnvWithFallback(c, envvar.KeeperRegistrySyncInterval) } // KeeperMaximumGracePeriod is the maximum number of blocks that a keeper will wait after performing @@ -830,31 +840,33 @@ func (c *generalConfig) KeeperMaximumGracePeriod() int64 { // KeeperRegistrySyncUpkeepQueueSize represents the maximum number of upkeeps that can be synced in parallel func (c *generalConfig) KeeperRegistrySyncUpkeepQueueSize() uint32 { - return c.getWithFallback("KeeperRegistrySyncUpkeepQueueSize", parse.Uint32).(uint32) + return getEnvWithFallback(c, envvar.KeeperRegistrySyncUpkeepQueueSize) } // KeeperCheckUpkeepGasPriceFeatureEnabled enables keepers to include a gas price when running checkUpkeep func (c *generalConfig) KeeperCheckUpkeepGasPriceFeatureEnabled() bool { - return c.getWithFallback("KeeperCheckUpkeepGasPriceFeatureEnabled", parse.Bool).(bool) + return getEnvWithFallback(c, envvar.NewBool("KeeperCheckUpkeepGasPriceFeatureEnabled")) +} + +// KeeperTurnLookBack represents the number of blocks in the past to loo back when getting block for turn +func (c *generalConfig) KeeperTurnLookBack() int64 { + return c.viper.GetInt64(envvar.Name("KeeperTurnLookBack")) +} + +// KeeperTurnFlagEnabled enables new turn taking algo for keepers +func (c *generalConfig) KeeperTurnFlagEnabled() bool { + return getEnvWithFallback(c, envvar.NewBool("KeeperTurnFlagEnabled")) } // JSONConsole when set to true causes logging to be made in JSON format // If set to false, logs in console format func (c *generalConfig) JSONConsole() bool { - return c.getEnvWithFallback(envvar.JSONConsole).(bool) + return getEnvWithFallback(c, envvar.JSONConsole) } // ExplorerURL returns the websocket URL for this node to push stats to, or nil. func (c *generalConfig) ExplorerURL() *url.URL { - rval := c.getWithFallback("ExplorerURL", parse.URL) - switch t := rval.(type) { - case nil: - return nil - case *url.URL: - return t - default: - panic(fmt.Sprintf("invariant: ExplorerURL returned as type %T", rval)) - } + return getEnvWithFallback(c, envvar.New("ExplorerURL", url.Parse)) } // ExplorerAccessKey returns the access key for authenticating with explorer @@ -867,17 +879,21 @@ func (c *generalConfig) ExplorerSecret() string { return c.viper.GetString(envvar.Name("ExplorerSecret")) } +// SolanaNodes is a hack to allow node operators to give a JSON string that +// sets up multiple nodes +func (c *generalConfig) SolanaNodes() string { + return c.viper.GetString(envvar.Name("SolanaNodes")) +} + +// TerraNodes is a hack to allow node operators to give a JSON string that +// sets up multiple nodes +func (c *generalConfig) TerraNodes() string { + return c.viper.GetString(envvar.Name("TerraNodes")) +} + // TelemetryIngressURL returns the WSRPC URL for this node to push telemetry to, or nil. func (c *generalConfig) TelemetryIngressURL() *url.URL { - rval := c.getWithFallback("TelemetryIngressURL", parse.URL) - switch t := rval.(type) { - case nil: - return nil - case *url.URL: - return t - default: - panic(fmt.Sprintf("invariant: TelemetryIngressURL returned as type %T", rval)) - } + return getEnvWithFallback(c, envvar.New("TelemetryIngressURL", url.Parse)) } // TelemetryIngressServerPubKey returns the public key to authenticate the telemetry ingress server @@ -912,7 +928,7 @@ func (c *generalConfig) TelemetryIngressUseBatchSend() bool { // TelemetryIngressLogging toggles very verbose logging of raw telemetry messages for the TelemetryIngressClient func (c *generalConfig) TelemetryIngressLogging() bool { - return c.getWithFallback("TelemetryIngressLogging", parse.Bool).(bool) + return getEnvWithFallback(c, envvar.NewBool("TelemetryIngressLogging")) } // TelemetryIngressUniconn toggles which ws connection style is used. @@ -921,11 +937,11 @@ func (c *generalConfig) TelemetryIngressUniConn() bool { } func (c *generalConfig) ORMMaxOpenConns() int { - return int(c.getWithFallback("ORMMaxOpenConns", parse.Uint16).(uint16)) + return int(getEnvWithFallback(c, envvar.NewUint16("ORMMaxOpenConns"))) } func (c *generalConfig) ORMMaxIdleConns() int { - return int(c.getWithFallback("ORMMaxIdleConns", parse.Uint16).(uint16)) + return int(getEnvWithFallback(c, envvar.NewUint16("ORMMaxIdleConns"))) } // LogLevel represents the maximum level of log messages to output. @@ -950,18 +966,18 @@ func (c *generalConfig) SetLogLevel(lvl zapcore.Level) error { // LogFileMaxSize configures disk preservation of logs max size (in megabytes) before file rotation. func (c *generalConfig) LogFileMaxSize() utils.FileSize { - return c.getWithFallback("LogFileMaxSize", parse.FileSize).(utils.FileSize) + return getEnvWithFallback(c, envvar.LogFileMaxSize) } // LogFileMaxAge configures disk preservation of logs max age (in days) before file rotation. func (c *generalConfig) LogFileMaxAge() int64 { - return c.getWithFallback("LogFileMaxAge", parse.Int64).(int64) + return getEnvWithFallback(c, envvar.LogFileMaxAge) } // LogFileMaxBackups configures disk preservation of the max amount of old log files to retain. // If this is set to 0, the node will retain all old log files instead. func (c *generalConfig) LogFileMaxBackups() int64 { - return c.getWithFallback("LogFileMaxBackups", parse.Int64).(int64) + return getEnvWithFallback(c, envvar.LogFileMaxBackups) } // LogSQL tells chainlink to log all SQL statements made using the default logger @@ -980,12 +996,12 @@ func (c *generalConfig) SetLogSQL(logSQL bool) { // LogUnixTimestamps if set to true will log with timestamp in unix format, otherwise uses ISO8601 func (c *generalConfig) LogUnixTimestamps() bool { - return c.getEnvWithFallback(envvar.LogUnixTS).(bool) + return getEnvWithFallback(c, envvar.LogUnixTS) } // Port represents the port Chainlink should listen on for client requests. func (c *generalConfig) Port() uint16 { - return c.getWithFallback("Port", parse.Uint16).(uint16) + return getEnvWithFallback(c, envvar.NewUint16("Port")) } // DefaultChainID represents the chain ID which jobs will use if one is not explicitly specified @@ -1000,7 +1016,7 @@ func (c *generalConfig) DefaultChainID() *big.Int { "error", err) return nil } - return v.(*big.Int) + return v } return nil @@ -1010,18 +1026,18 @@ func (c *generalConfig) DefaultChainID() *big.Int { // socket open for writing a response to an HTTP request. This sometimes needs // to be increased for pprof. func (c *generalConfig) HTTPServerWriteTimeout() time.Duration { - return c.getWithFallback("HTTPServerWriteTimeout", parse.Duration).(time.Duration) + return getEnvWithFallback(c, envvar.HTTPServerWriteTimeout) } // ReaperExpiration represents how long a session is held in the DB before being cleared func (c *generalConfig) ReaperExpiration() models.Duration { - return models.MustMakeDuration(c.getWithFallback("ReaperExpiration", parse.Duration).(time.Duration)) + return models.MustMakeDuration(getEnvWithFallback(c, envvar.NewDuration("ReaperExpiration"))) } // RootDir represents the location on the file system where Chainlink should // keep its files. func (c *generalConfig) RootDir() string { - return c.getEnvWithFallback(envvar.RootDir).(string) + return getEnvWithFallback(c, envvar.RootDir) } // RPID Fetches the RPID used for WebAuthn sessions. The RPID value should be the FQDN (localhost) @@ -1042,7 +1058,7 @@ func (c *generalConfig) SecureCookies() bool { // SessionTimeout is the maximum duration that a user session can persist without any activity. func (c *generalConfig) SessionTimeout() models.Duration { - return models.MustMakeDuration(c.getWithFallback("SessionTimeout", parse.Duration).(time.Duration)) + return models.MustMakeDuration(getEnvWithFallback(c, envvar.NewDuration("SessionTimeout"))) } // TLSCertPath represents the file system location of the TLS certificate @@ -1065,7 +1081,7 @@ func (c *generalConfig) TLSKeyPath() string { // TLSPort represents the port Chainlink should listen on for encrypted client requests. func (c *generalConfig) TLSPort() uint16 { - return c.getWithFallback("TLSPort", parse.Uint16).(uint16) + return getEnvWithFallback(c, envvar.NewUint16("TLSPort")) } // TLSRedirect forces TLS redirect for unencrypted connections @@ -1080,7 +1096,7 @@ func (c *generalConfig) UnAuthenticatedRateLimit() int64 { // UnAuthenticatedRateLimitPeriod defines the period to which unauthenticated requests get limited func (c *generalConfig) UnAuthenticatedRateLimitPeriod() models.Duration { - return models.MustMakeDuration(c.getWithFallback("UnAuthenticatedRateLimitPeriod", parse.Duration).(time.Duration)) + return models.MustMakeDuration(getEnvWithFallback(c, envvar.NewDuration("UnAuthenticatedRateLimitPeriod"))) } func (c *generalConfig) TLSDir() string { @@ -1121,10 +1137,10 @@ func (c *generalConfig) SessionOptions() sessions.Options { // Deprecated - prefer getEnvWithFallback with an EnvVar func (c *generalConfig) getWithFallback(name string, parser func(string) (interface{}, error)) interface{} { - return c.getEnvWithFallback(envvar.New(name, parser)) + return getEnvWithFallback(c, envvar.New(name, parser)) } -func (c *generalConfig) getEnvWithFallback(e *envvar.EnvVar) interface{} { +func getEnvWithFallback[T any](c *generalConfig, e *envvar.EnvVar[T]) T { v, invalid, err := e.ParseFrom(c.viper.GetString) if err != nil { c.lggr.Panic(err) @@ -1143,7 +1159,7 @@ var ( DatabaseBackupModeFull DatabaseBackupMode = "full" ) -func parseDatabaseBackupMode(s string) (interface{}, error) { +func parseDatabaseBackupMode(s string) (DatabaseBackupMode, error) { switch DatabaseBackupMode(s) { case DatabaseBackupModeNone, DatabaseBackupModeLite, DatabaseBackupModeFull: return DatabaseBackupMode(s), nil @@ -1152,10 +1168,10 @@ func parseDatabaseBackupMode(s string) (interface{}, error) { } } -func (c *generalConfig) lookupEnv(k string, parse func(string) (interface{}, error)) (interface{}, bool) { +func lookupEnv[T any](c *generalConfig, k string, parse func(string) (T, error)) (t T, ok bool) { s, ok := os.LookupEnv(k) if !ok { - return nil, false + return } val, err := parse(s) if err == nil { @@ -1163,330 +1179,157 @@ func (c *generalConfig) lookupEnv(k string, parse func(string) (interface{}, err } c.lggr.Errorw(fmt.Sprintf("Invalid value provided for %s, falling back to default.", s), "value", s, "key", k, "error", err) - return nil, false + return } // EVM methods func (c *generalConfig) GlobalBalanceMonitorEnabled() (bool, bool) { - val, ok := c.lookupEnv(envvar.Name("BalanceMonitorEnabled"), parse.Bool) - if val == nil { - return false, false - } - return val.(bool), ok + return lookupEnv(c, envvar.Name("BalanceMonitorEnabled"), strconv.ParseBool) } func (c *generalConfig) GlobalBlockEmissionIdleWarningThreshold() (time.Duration, bool) { - val, ok := c.lookupEnv(envvar.Name("BlockEmissionIdleWarningThreshold"), parse.Duration) - if val == nil { - return 0, false - } - return val.(time.Duration), ok + return lookupEnv(c, envvar.Name("BlockEmissionIdleWarningThreshold"), time.ParseDuration) } func (c *generalConfig) GlobalBlockHistoryEstimatorBatchSize() (uint32, bool) { - val, ok := c.lookupEnv(envvar.Name("BlockHistoryEstimatorBatchSize"), parse.Uint32) - if val == nil { - return 0, false - } - return val.(uint32), ok + return lookupEnv(c, envvar.Name("BlockHistoryEstimatorBatchSize"), parse.Uint32) } func (c *generalConfig) GlobalBlockHistoryEstimatorBlockDelay() (uint16, bool) { - val, ok := c.lookupEnv(envvar.Name("BlockHistoryEstimatorBlockDelay"), parse.Uint16) - if val == nil { - return 0, false - } - return val.(uint16), ok + return lookupEnv(c, envvar.Name("BlockHistoryEstimatorBlockDelay"), parse.Uint16) } func (c *generalConfig) GlobalBlockHistoryEstimatorBlockHistorySize() (uint16, bool) { - val, ok := c.lookupEnv(envvar.Name("BlockHistoryEstimatorBlockHistorySize"), parse.Uint16) - if val == nil { - return 0, false - } - return val.(uint16), ok + return lookupEnv(c, envvar.Name("BlockHistoryEstimatorBlockHistorySize"), parse.Uint16) } func (c *generalConfig) GlobalBlockHistoryEstimatorTransactionPercentile() (uint16, bool) { - val, ok := c.lookupEnv(envvar.Name("BlockHistoryEstimatorTransactionPercentile"), parse.Uint16) - if val == nil { - return 0, false - } - return val.(uint16), ok + return lookupEnv(c, envvar.Name("BlockHistoryEstimatorTransactionPercentile"), parse.Uint16) } func (c *generalConfig) GlobalEthTxReaperInterval() (time.Duration, bool) { - val, ok := c.lookupEnv(envvar.Name("EthTxReaperInterval"), parse.Duration) - if val == nil { - return 0, false - } - return val.(time.Duration), ok + return lookupEnv(c, envvar.Name("EthTxReaperInterval"), time.ParseDuration) } func (c *generalConfig) GlobalEthTxReaperThreshold() (time.Duration, bool) { - val, ok := c.lookupEnv(envvar.Name("EthTxReaperThreshold"), parse.Duration) - if val == nil { - return 0, false - } - return val.(time.Duration), ok + return lookupEnv(c, envvar.Name("EthTxReaperThreshold"), time.ParseDuration) } func (c *generalConfig) GlobalEthTxResendAfterThreshold() (time.Duration, bool) { - val, ok := c.lookupEnv(envvar.Name("EthTxResendAfterThreshold"), parse.Duration) - if val == nil { - return 0, false - } - return val.(time.Duration), ok -} -func (c *generalConfig) GlobalEvmDefaultBatchSize() (uint32, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmDefaultBatchSize"), parse.Uint32) - if val == nil { - return 0, false - } - return val.(uint32), ok + return lookupEnv(c, envvar.Name("EthTxResendAfterThreshold"), time.ParseDuration) } func (c *generalConfig) GlobalEvmFinalityDepth() (uint32, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmFinalityDepth"), parse.Uint32) - if val == nil { - return 0, false - } - return val.(uint32), ok + return lookupEnv(c, envvar.Name("EvmFinalityDepth"), parse.Uint32) } func (c *generalConfig) GlobalEvmGasBumpPercent() (uint16, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmGasBumpPercent"), parse.Uint16) - if val == nil { - return 0, false - } - return val.(uint16), ok + return lookupEnv(c, envvar.Name("EvmGasBumpPercent"), parse.Uint16) } func (c *generalConfig) GlobalEvmGasBumpThreshold() (uint64, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmGasBumpThreshold"), parse.Uint64) - if val == nil { - return 0, false - } - return val.(uint64), ok + return lookupEnv(c, envvar.Name("EvmGasBumpThreshold"), parse.Uint64) } func (c *generalConfig) GlobalEvmGasBumpTxDepth() (uint16, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmGasBumpTxDepth"), parse.Uint16) - if val == nil { - return 0, false - } - return val.(uint16), ok + return lookupEnv(c, envvar.Name("EvmGasBumpTxDepth"), parse.Uint16) } func (c *generalConfig) GlobalEvmGasBumpWei() (*big.Int, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmGasBumpWei"), parse.BigInt) - if val == nil { - return nil, false - } - return val.(*big.Int), ok + return lookupEnv(c, envvar.Name("EvmGasBumpWei"), parse.BigInt) } func (c *generalConfig) GlobalEvmGasFeeCapDefault() (*big.Int, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmGasFeeCapDefault"), parse.BigInt) - if val == nil { - return nil, false - } - return val.(*big.Int), ok + return lookupEnv(c, envvar.Name("EvmGasFeeCapDefault"), parse.BigInt) } func (c *generalConfig) GlobalBlockHistoryEstimatorEIP1559FeeCapBufferBlocks() (uint16, bool) { - val, ok := c.lookupEnv(envvar.Name("BlockHistoryEstimatorEIP1559FeeCapBufferBlocks"), parse.Uint16) - if val == nil { - return 0, false - } - return val.(uint16), ok + return lookupEnv(c, envvar.Name("BlockHistoryEstimatorEIP1559FeeCapBufferBlocks"), parse.Uint16) } func (c *generalConfig) GlobalEvmGasLimitDefault() (uint64, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmGasLimitDefault"), parse.Uint64) - if val == nil { - return 0, false - } - return val.(uint64), ok + return lookupEnv(c, envvar.Name("EvmGasLimitDefault"), parse.Uint64) } func (c *generalConfig) GlobalEvmGasLimitMultiplier() (float32, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmGasLimitMultiplier"), parse.F32) - if val == nil { - return 0, false - } - return val.(float32), ok + return lookupEnv(c, envvar.Name("EvmGasLimitMultiplier"), parse.F32) } func (c *generalConfig) GlobalEvmGasLimitTransfer() (uint64, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmGasLimitTransfer"), parse.Uint64) - if val == nil { - return 0, false - } - return val.(uint64), ok + return lookupEnv(c, envvar.Name("EvmGasLimitTransfer"), parse.Uint64) } func (c *generalConfig) GlobalEvmGasPriceDefault() (*big.Int, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmGasPriceDefault"), parse.BigInt) - if val == nil { - return nil, false - } - return val.(*big.Int), ok + return lookupEnv(c, envvar.Name("EvmGasPriceDefault"), parse.BigInt) } func (c *generalConfig) GlobalEvmHeadTrackerHistoryDepth() (uint32, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmHeadTrackerHistoryDepth"), parse.Uint32) - if val == nil { - return 0, false - } - return val.(uint32), ok + return lookupEnv(c, envvar.Name("EvmHeadTrackerHistoryDepth"), parse.Uint32) } func (c *generalConfig) GlobalEvmHeadTrackerMaxBufferSize() (uint32, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmHeadTrackerMaxBufferSize"), parse.Uint32) - if val == nil { - return 0, false - } - return val.(uint32), ok + return lookupEnv(c, envvar.Name("EvmHeadTrackerMaxBufferSize"), parse.Uint32) } func (c *generalConfig) GlobalEvmHeadTrackerSamplingInterval() (time.Duration, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmHeadTrackerSamplingInterval"), parse.Duration) - if val == nil { - return 0, false - } - return val.(time.Duration), ok + return lookupEnv(c, envvar.Name("EvmHeadTrackerSamplingInterval"), time.ParseDuration) } func (c *generalConfig) GlobalEvmLogBackfillBatchSize() (uint32, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmLogBackfillBatchSize"), parse.Uint32) - if val == nil { - return 0, false - } - return val.(uint32), ok + return lookupEnv(c, envvar.Name("EvmLogBackfillBatchSize"), parse.Uint32) +} +func (c *generalConfig) GlobalEvmLogPollInterval() (time.Duration, bool) { + return lookupEnv(c, envvar.Name("EvmLogPollInterval"), time.ParseDuration) } func (c *generalConfig) GlobalEvmMaxGasPriceWei() (*big.Int, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmMaxGasPriceWei"), parse.BigInt) - if val == nil { - return nil, false - } - return val.(*big.Int), ok + return lookupEnv(c, envvar.Name("EvmMaxGasPriceWei"), parse.BigInt) } func (c *generalConfig) GlobalEvmMaxInFlightTransactions() (uint32, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmMaxInFlightTransactions"), parse.Uint32) - if val == nil { - return 0, false - } - return val.(uint32), ok + return lookupEnv(c, envvar.Name("EvmMaxInFlightTransactions"), parse.Uint32) } func (c *generalConfig) GlobalEvmMaxQueuedTransactions() (uint64, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmMaxQueuedTransactions"), parse.Uint64) - if val == nil { - return 0, false - } - return val.(uint64), ok + return lookupEnv(c, envvar.Name("EvmMaxQueuedTransactions"), parse.Uint64) } func (c *generalConfig) GlobalEvmMinGasPriceWei() (*big.Int, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmMinGasPriceWei"), parse.BigInt) - if val == nil { - return nil, false - } - return val.(*big.Int), ok + return lookupEnv(c, envvar.Name("EvmMinGasPriceWei"), parse.BigInt) } func (c *generalConfig) GlobalEvmNonceAutoSync() (bool, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmNonceAutoSync"), parse.Bool) - if val == nil { - return false, false - } - return val.(bool), ok + return lookupEnv(c, envvar.Name("EvmNonceAutoSync"), strconv.ParseBool) +} +func (c *generalConfig) GlobalEvmUseForwarders() (bool, bool) { + return lookupEnv(c, envvar.Name("EvmUseForwarders"), strconv.ParseBool) } func (c *generalConfig) GlobalEvmRPCDefaultBatchSize() (uint32, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmRPCDefaultBatchSize"), parse.Uint32) - if val == nil { - return 0, false - } - return val.(uint32), ok + return lookupEnv(c, envvar.Name("EvmRPCDefaultBatchSize"), parse.Uint32) } func (c *generalConfig) GlobalFlagsContractAddress() (string, bool) { - val, ok := c.lookupEnv(envvar.Name("FlagsContractAddress"), parse.String) - if val == nil { - return "", false - } - return val.(string), ok + return lookupEnv(c, envvar.Name("FlagsContractAddress"), parse.String) } func (c *generalConfig) GlobalGasEstimatorMode() (string, bool) { - val, ok := c.lookupEnv(envvar.Name("GasEstimatorMode"), parse.String) - if val == nil { - return "", false - } - return val.(string), ok + return lookupEnv(c, envvar.Name("GasEstimatorMode"), parse.String) } // GlobalChainType overrides all chains and forces them to act as a particular // chain type. List of chain types is given in `chaintype.go`. func (c *generalConfig) GlobalChainType() (string, bool) { - val, ok := c.lookupEnv(envvar.Name("ChainType"), parse.String) - if val == nil { - return "", false - } - return val.(string), ok + return lookupEnv(c, envvar.Name("ChainType"), parse.String) } func (c *generalConfig) GlobalLinkContractAddress() (string, bool) { - val, ok := c.lookupEnv(envvar.Name("LinkContractAddress"), parse.String) - if val == nil { - return "", false - } - return val.(string), ok + return lookupEnv(c, envvar.Name("LinkContractAddress"), parse.String) } func (c *generalConfig) GlobalMinIncomingConfirmations() (uint32, bool) { - val, ok := c.lookupEnv(envvar.Name("MinIncomingConfirmations"), parse.Uint32) - if val == nil { - return 0, false - } - return val.(uint32), ok + return lookupEnv(c, envvar.Name("MinIncomingConfirmations"), parse.Uint32) } func (c *generalConfig) GlobalMinRequiredOutgoingConfirmations() (uint64, bool) { - val, ok := c.lookupEnv(envvar.Name("MinRequiredOutgoingConfirmations"), parse.Uint64) - if val == nil { - return 0, false - } - return val.(uint64), ok + return lookupEnv(c, envvar.Name("MinRequiredOutgoingConfirmations"), parse.Uint64) } func (c *generalConfig) GlobalMinimumContractPayment() (*assets.Link, bool) { - val, ok := c.lookupEnv(envvar.Name("MinimumContractPayment"), parse.Link) - if val == nil { - return nil, false - } - return val.(*assets.Link), ok + return lookupEnv(c, envvar.Name("MinimumContractPayment"), parse.Link) } func (c *generalConfig) GlobalEvmEIP1559DynamicFees() (bool, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmEIP1559DynamicFees"), parse.Bool) - if val == nil { - return false, false - } - return val.(bool), ok + return lookupEnv(c, envvar.Name("EvmEIP1559DynamicFees"), strconv.ParseBool) } func (c *generalConfig) GlobalEvmGasTipCapDefault() (*big.Int, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmGasTipCapDefault"), parse.BigInt) - if val == nil { - return nil, false - } - return val.(*big.Int), ok + return lookupEnv(c, envvar.Name("EvmGasTipCapDefault"), parse.BigInt) } func (c *generalConfig) GlobalEvmGasTipCapMinimum() (*big.Int, bool) { - val, ok := c.lookupEnv(envvar.Name("EvmGasTipCapMinimum"), parse.BigInt) - if val == nil { - return nil, false - } - return val.(*big.Int), ok + return lookupEnv(c, envvar.Name("EvmGasTipCapMinimum"), parse.BigInt) } func (c *generalConfig) GlobalNodeNoNewHeadsThreshold() (time.Duration, bool) { - val, ok := c.lookupEnv(envvar.Name("NodeNoNewHeadsThreshold"), parse.Duration) - if val == nil { - return 0, false - } - return val.(time.Duration), ok + return lookupEnv(c, envvar.Name("NodeNoNewHeadsThreshold"), time.ParseDuration) } func (c *generalConfig) GlobalNodePollFailureThreshold() (uint32, bool) { - val, ok := c.lookupEnv(envvar.Name("NodePollFailureThreshold"), parse.Uint32) - if val == nil { - return 0, false - } - return val.(uint32), ok + return lookupEnv(c, envvar.Name("NodePollFailureThreshold"), parse.Uint32) } func (c *generalConfig) GlobalNodePollInterval() (time.Duration, bool) { - val, ok := c.lookupEnv(envvar.Name("NodePollInterval"), parse.Duration) - if val == nil { - return 0, false - } - return val.(time.Duration), ok + return lookupEnv(c, envvar.Name("NodePollInterval"), time.ParseDuration) } // DatabaseLockingMode can be one of 'dual', 'advisorylock', 'lease' or 'none' // It controls which mode to use to enforce that only one Chainlink application can use the database func (c *generalConfig) DatabaseLockingMode() string { - return c.getWithFallback("DatabaseLockingMode", parse.String).(string) + return getEnvWithFallback(c, envvar.NewString("DatabaseLockingMode")) } // LeaseLockRefreshInterval controls how often the node should attempt to @@ -1504,7 +1347,7 @@ func (c *generalConfig) LeaseLockDuration() time.Duration { // AdvisoryLockID is the application advisory lock ID. Should match all other // chainlink applications that might access this database func (c *generalConfig) AdvisoryLockID() int64 { - return c.getWithFallback("AdvisoryLockID", parse.Int64).(int64) + return getEnvWithFallback(c, envvar.AdvisoryLockID) } // AdvisoryLockCheckInterval controls how often Chainlink will check to make diff --git a/core/config/mocks/general_config.go b/core/config/mocks/general_config.go index 19243a0f2e2..cde9375b731 100644 --- a/core/config/mocks/general_config.go +++ b/core/config/mocks/general_config.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks @@ -785,6 +785,20 @@ func (_m *GeneralConfig) FeatureFeedsManager() bool { return r0 } +// FeatureLogPoller provides a mock function with given fields: +func (_m *GeneralConfig) FeatureLogPoller() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + // FeatureOffchainReporting provides a mock function with given fields: func (_m *GeneralConfig) FeatureOffchainReporting() bool { ret := _m.Called() @@ -1086,27 +1100,6 @@ func (_m *GeneralConfig) GlobalEthTxResendAfterThreshold() (time.Duration, bool) return r0, r1 } -// GlobalEvmDefaultBatchSize provides a mock function with given fields: -func (_m *GeneralConfig) GlobalEvmDefaultBatchSize() (uint32, bool) { - ret := _m.Called() - - var r0 uint32 - if rf, ok := ret.Get(0).(func() uint32); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint32) - } - - var r1 bool - if rf, ok := ret.Get(1).(func() bool); ok { - r1 = rf() - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 -} - // GlobalEvmEIP1559DynamicFees provides a mock function with given fields: func (_m *GeneralConfig) GlobalEvmEIP1559DynamicFees() (bool, bool) { ret := _m.Called() @@ -1474,6 +1467,27 @@ func (_m *GeneralConfig) GlobalEvmLogBackfillBatchSize() (uint32, bool) { return r0, r1 } +// GlobalEvmLogPollInterval provides a mock function with given fields: +func (_m *GeneralConfig) GlobalEvmLogPollInterval() (time.Duration, bool) { + ret := _m.Called() + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + var r1 bool + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + // GlobalEvmMaxGasPriceWei provides a mock function with given fields: func (_m *GeneralConfig) GlobalEvmMaxGasPriceWei() (*big.Int, bool) { ret := _m.Called() @@ -1604,6 +1618,27 @@ func (_m *GeneralConfig) GlobalEvmRPCDefaultBatchSize() (uint32, bool) { return r0, r1 } +// GlobalEvmUseForwarders provides a mock function with given fields: +func (_m *GeneralConfig) GlobalEvmUseForwarders() (bool, bool) { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + var r1 bool + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + // GlobalFlagsContractAddress provides a mock function with given fields: func (_m *GeneralConfig) GlobalFlagsContractAddress() (string, bool) { ret := _m.Called() @@ -2131,6 +2166,34 @@ func (_m *GeneralConfig) KeeperRegistrySyncUpkeepQueueSize() uint32 { return r0 } +// KeeperTurnFlagEnabled provides a mock function with given fields: +func (_m *GeneralConfig) KeeperTurnFlagEnabled() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// KeeperTurnLookBack provides a mock function with given fields: +func (_m *GeneralConfig) KeeperTurnLookBack() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + // KeyFile provides a mock function with given fields: func (_m *GeneralConfig) KeyFile() string { ret := _m.Called() @@ -3209,6 +3272,34 @@ func (_m *GeneralConfig) SolanaEnabled() bool { return r0 } +// SolanaNodes provides a mock function with given fields: +func (_m *GeneralConfig) SolanaNodes() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// TerraNodes provides a mock function with given fields: +func (_m *GeneralConfig) TerraNodes() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + // TLSCertPath provides a mock function with given fields: func (_m *GeneralConfig) TLSCertPath() string { ret := _m.Called() diff --git a/core/config/ocr1_config.go b/core/config/ocr1_config.go index dfe6c5281cd..287eb8a1354 100644 --- a/core/config/ocr1_config.go +++ b/core/config/ocr1_config.go @@ -36,35 +36,19 @@ func (c *generalConfig) getDuration(field string) time.Duration { } func (c *generalConfig) GlobalOCRContractConfirmations() (uint16, bool) { - val, ok := c.lookupEnv(envvar.Name("OCRContractConfirmations"), parse.Uint16) - if val == nil { - return 0, false - } - return val.(uint16), ok + return lookupEnv(c, envvar.Name("OCRContractConfirmations"), parse.Uint16) } func (c *generalConfig) GlobalOCRObservationGracePeriod() (time.Duration, bool) { - val, ok := c.lookupEnv(envvar.Name("OCRObservationGracePeriod"), parse.Duration) - if val == nil { - return 0, false - } - return val.(time.Duration), ok + return lookupEnv(c, envvar.Name("OCRObservationGracePeriod"), time.ParseDuration) } func (c *generalConfig) GlobalOCRContractTransmitterTransmitTimeout() (time.Duration, bool) { - val, ok := c.lookupEnv(envvar.Name("OCRContractTransmitterTransmitTimeout"), parse.Duration) - if val == nil { - return 0, false - } - return val.(time.Duration), ok + return lookupEnv(c, envvar.Name("OCRContractTransmitterTransmitTimeout"), time.ParseDuration) } func (c *generalConfig) GlobalOCRDatabaseTimeout() (time.Duration, bool) { - val, ok := c.lookupEnv(envvar.Name("OCRDatabaseTimeout"), parse.Duration) - if val == nil { - return 0, false - } - return val.(time.Duration), ok + return lookupEnv(c, envvar.Name("OCRDatabaseTimeout"), time.ParseDuration) } func (c *generalConfig) OCRContractPollInterval() time.Duration { diff --git a/core/config/ocr2_config.go b/core/config/ocr2_config.go index 9781916c1c3..2b4f06f5f1d 100644 --- a/core/config/ocr2_config.go +++ b/core/config/ocr2_config.go @@ -26,7 +26,7 @@ type OCR2Config interface { } func (c *generalConfig) OCR2ContractConfirmations() uint16 { - return c.getWithFallback("OCR2ContractConfirmations", parse.Uint16).(uint16) + return getEnvWithFallback(c, envvar.NewUint16("OCR2ContractConfirmations")) } func (c *generalConfig) OCR2ContractPollInterval() time.Duration { diff --git a/core/config/p2p_config.go b/core/config/p2p_config.go index c5d509e2d0b..6d02ac52158 100644 --- a/core/config/p2p_config.go +++ b/core/config/p2p_config.go @@ -8,7 +8,6 @@ import ( ocrnetworking "github.com/smartcontractkit/libocr/networking" "github.com/smartcontractkit/chainlink/core/config/envvar" - "github.com/smartcontractkit/chainlink/core/config/parse" "github.com/smartcontractkit/chainlink/core/services/keystore/keys/p2pkey" ) @@ -61,14 +60,14 @@ func (c *generalConfig) P2PIncomingMessageBufferSize() int { if c.OCRIncomingMessageBufferSize() != 0 { return c.OCRIncomingMessageBufferSize() } - return int(c.getWithFallback("P2PIncomingMessageBufferSize", parse.Uint16).(uint16)) + return int(getEnvWithFallback(c, envvar.NewUint16("P2PIncomingMessageBufferSize"))) } func (c *generalConfig) P2POutgoingMessageBufferSize() int { if c.OCROutgoingMessageBufferSize() != 0 { return c.OCRIncomingMessageBufferSize() } - return int(c.getWithFallback("P2PIncomingMessageBufferSize", parse.Uint16).(uint16)) + return int(getEnvWithFallback(c, envvar.NewUint16("P2PIncomingMessageBufferSize"))) } type P2PDeprecated interface { diff --git a/core/config/p2p_v1_config.go b/core/config/p2p_v1_config.go index 74045269283..80c23c34b97 100644 --- a/core/config/p2p_v1_config.go +++ b/core/config/p2p_v1_config.go @@ -128,7 +128,7 @@ func (c *generalConfig) P2PDHTLookupInterval() int { if c.OCRDHTLookupInterval() != 0 { return c.OCRDHTLookupInterval() } - return int(c.getWithFallback("P2PDHTLookupInterval", parse.Uint16).(uint16)) + return int(getEnvWithFallback(c, envvar.NewUint16("P2PDHTLookupInterval"))) } func (c *generalConfig) P2PNewStreamTimeout() time.Duration { diff --git a/core/config/parse/parsers.go b/core/config/parse/parsers.go index 79a9aea2762..0b02772ac2e 100644 --- a/core/config/parse/parsers.go +++ b/core/config/parse/parsers.go @@ -16,13 +16,11 @@ import ( "github.com/smartcontractkit/chainlink/core/utils" ) -// String parser -func String(str string) (interface{}, error) { +func String(str string) (string, error) { return str, nil } -// Link parser -func Link(str string) (interface{}, error) { +func Link(str string) (*assets.Link, error) { i, ok := new(assets.Link).SetString(str, 10) if !ok { return i, fmt.Errorf("unable to parse '%v' into *assets.Link(base 10)", str) @@ -30,39 +28,33 @@ func Link(str string) (interface{}, error) { return i, nil } -// LogLevel sets log level -func LogLevel(str string) (interface{}, error) { +func LogLevel(str string) (zapcore.Level, error) { var lvl zapcore.Level err := lvl.Set(str) return lvl, err } -// Uint16 converts string to uint16 -func Uint16(s string) (interface{}, error) { +func Uint16(s string) (uint16, error) { v, err := strconv.ParseUint(s, 10, 16) return uint16(v), err } -// Uint32 converts string to uint32 -func Uint32(s string) (interface{}, error) { +func Uint32(s string) (uint32, error) { v, err := strconv.ParseUint(s, 10, 32) return uint32(v), err } -// Uint64 converts string to uint64 -func Uint64(s string) (interface{}, error) { +func Uint64(s string) (uint64, error) { v, err := strconv.ParseUint(s, 10, 64) return v, err } -// Int64 converts string to int64 -func Int64(s string) (interface{}, error) { +func Int64(s string) (int64, error) { v, err := strconv.ParseInt(s, 10, 64) return v, err } -// F32 converts string to float32 -func F32(s string) (interface{}, error) { +func F32(s string) (float32, error) { v, err := strconv.ParseFloat(s, 32) return float32(v), err } @@ -82,8 +74,7 @@ func Duration(s string) (interface{}, error) { return time.ParseDuration(s) } -// FileSize parses string as FileSize type -func FileSize(s string) (interface{}, error) { +func FileSize(s string) (utils.FileSize, error) { var fs utils.FileSize err := fs.UnmarshalText([]byte(s)) return fs, err @@ -94,8 +85,7 @@ func Bool(s string) (interface{}, error) { return strconv.ParseBool(s) } -// BigInt parses string into a big int -func BigInt(str string) (interface{}, error) { +func BigInt(str string) (*big.Int, error) { i, ok := new(big.Int).SetString(str, 10) if !ok { return i, fmt.Errorf("unable to parse %v into *big.Int(base 10)", str) @@ -103,11 +93,10 @@ func BigInt(str string) (interface{}, error) { return i, nil } -// HomeDir parses string as a file path -func HomeDir(str string) (interface{}, error) { +func HomeDir(str string) (string, error) { exp, err := homedir.Expand(str) if err != nil { - return nil, err + return "", err } return filepath.ToSlash(exp), nil } diff --git a/core/config/presenters.go b/core/config/presenters.go index 84a4ca6030c..648e3a7995d 100644 --- a/core/config/presenters.go +++ b/core/config/presenters.go @@ -70,6 +70,8 @@ type EnvPrinter struct { KeeperRegistrySyncInterval time.Duration `json:"KEEPER_REGISTRY_SYNC_INTERVAL"` KeeperRegistrySyncUpkeepQueueSize uint32 `json:"KEEPER_REGISTRY_SYNC_UPKEEP_QUEUE_SIZE"` KeeperCheckUpkeepGasPriceFeatureEnabled bool `json:"KEEPER_CHECK_UPKEEP_GAS_PRICE_FEATURE_ENABLED"` + KeeperTurnLookBack int64 `json:"KEEPER_TURN_LOOK_BACK"` + KeeperTurnFlagEnabled bool `json:"KEEPER_TURN_FLAG_ENABLED"` LeaseLockDuration time.Duration `json:"LEASE_LOCK_DURATION"` LeaseLockRefreshInterval time.Duration `json:"LEASE_LOCK_REFRESH_INTERVAL"` FlagsContractAddress string `json:"FLAGS_CONTRACT_ADDRESS"` @@ -137,6 +139,10 @@ func NewConfigPrinter(cfg GeneralConfig) ConfigPrinter { if cfg.TelemetryIngressURL() != nil { telemetryIngressURL = cfg.TelemetryIngressURL().String() } + bridgeResponseURL := "" + if cfg.BridgeResponseURL() != nil { + bridgeResponseURL = cfg.BridgeResponseURL().String() + } ocrTransmitTimeout, _ := cfg.GlobalOCRContractTransmitterTransmitTimeout() ocrDatabaseTimeout, _ := cfg.GlobalOCRDatabaseTimeout() return ConfigPrinter{ @@ -145,7 +151,7 @@ func NewConfigPrinter(cfg GeneralConfig) ConfigPrinter { AdvisoryLockID: cfg.AdvisoryLockID(), AllowOrigins: cfg.AllowOrigins(), BlockBackfillDepth: cfg.BlockBackfillDepth(), - BridgeResponseURL: cfg.BridgeResponseURL().String(), + BridgeResponseURL: bridgeResponseURL, ClientNodeURL: cfg.ClientNodeURL(), DatabaseBackupFrequency: cfg.DatabaseBackupFrequency(), DatabaseBackupMode: string(cfg.DatabaseBackupMode()), @@ -171,6 +177,8 @@ func NewConfigPrinter(cfg GeneralConfig) ConfigPrinter { KeeperCheckUpkeepGasPriceFeatureEnabled: cfg.KeeperCheckUpkeepGasPriceFeatureEnabled(), KeeperDefaultTransactionQueueDepth: cfg.KeeperDefaultTransactionQueueDepth(), KeeperGasPriceBufferPercent: cfg.KeeperGasPriceBufferPercent(), + KeeperTurnLookBack: cfg.KeeperTurnLookBack(), + KeeperTurnFlagEnabled: cfg.KeeperTurnFlagEnabled(), KeeperGasTipCapBufferPercent: cfg.KeeperGasTipCapBufferPercent(), KeeperBaseFeeBufferPercent: cfg.KeeperBaseFeeBufferPercent(), LeaseLockDuration: cfg.LeaseLockDuration(), diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index 484e49a718d..3d7d9cbd8a9 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -49,6 +49,7 @@ import ( evmMocks "github.com/smartcontractkit/chainlink/core/chains/evm/mocks" "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/core/chains/solana" "github.com/smartcontractkit/chainlink/core/chains/terra" "github.com/smartcontractkit/chainlink/core/cmd" "github.com/smartcontractkit/chainlink/core/config" @@ -331,7 +332,7 @@ func NewApplicationWithConfig(t testing.TB, cfg *configtest.TestGeneralConfig, f if chainORM != nil { panic("cannot set more than one chain") } - chainORM = evmtest.NewMockORM([]evmtypes.Chain{dep}) + chainORM = evmtest.NewMockORM([]evmtypes.Chain{dep}, nil) case pg.EventBroadcaster: eventBroadcaster = dep default: @@ -382,6 +383,20 @@ func NewApplicationWithConfig(t testing.TB, cfg *configtest.TestGeneralConfig, f lggr.Fatal(err) } } + if cfg.SolanaEnabled() { + solLggr := lggr.Named("Solana") + chains.Solana, err = solana.NewChainSet(solana.ChainSetOpts{ + Config: cfg, + Logger: solLggr, + DB: db, + KeyStore: keyStore.Solana(), + EventBroadcaster: eventBroadcaster, + ORM: solana.NewORM(db, solLggr, cfg), + }) + if err != nil { + lggr.Fatal(err) + } + } appInstance, err := chainlink.NewApplication(chainlink.ApplicationOpts{ Config: cfg, @@ -411,54 +426,38 @@ func NewApplicationWithConfig(t testing.TB, cfg *configtest.TestGeneralConfig, f return ta } -func NewEthMocksWithDefaultChain(t testing.TB) (c *evmMocks.Client, s *evmMocks.Subscription) { +func NewEthMocksWithDefaultChain(t testing.TB) (c *evmMocks.Client) { testutils.SkipShortDB(t) - c, s = NewEthMocks(t) + c = NewEthMocks(t) c.On("ChainID").Return(&FixtureChainID).Maybe() return } -func NewEthMocks(t testing.TB) (*evmMocks.Client, *evmMocks.Subscription) { - c, s := NewEthClientAndSubMock(t) +func NewEthMocks(t testing.TB) *evmMocks.Client { + c := new(evmMocks.Client) + c.Test(t) switch tt := t.(type) { case *testing.T: t.Cleanup(func() { c.AssertExpectations(tt) - s.AssertExpectations(tt) }) } - return c, s -} - -func NewEthClientAndSubMock(t mock.TestingT) (*evmMocks.Client, *evmMocks.Subscription) { - mockSub := new(evmMocks.Subscription) - mockSub.Test(t) - mockEth := new(evmMocks.Client) - mockEth.Test(t) - return mockEth, mockSub -} - -func NewEthClientAndSubMockWithDefaultChain(t mock.TestingT) (*evmMocks.Client, *evmMocks.Subscription) { - mockEth, mockSub := NewEthClientAndSubMock(t) - mockEth.On("ChainID").Return(&FixtureChainID).Maybe() - return mockEth, mockSub + return c } +// Deprecated: use evmtest.NewEthClientMock func NewEthClientMock(t mock.TestingT) *evmMocks.Client { - mockEth := new(evmMocks.Client) - mockEth.Test(t) - return mockEth + return evmtest.NewEthClientMock(t) } +// Deprecated: use evmtest.NewEthClientMockWithDefaultChain func NewEthClientMockWithDefaultChain(t testing.TB) *evmMocks.Client { - c := NewEthClientMock(t) - c.On("ChainID").Return(&FixtureChainID).Maybe() - return c + return evmtest.NewEthClientMockWithDefaultChain(t) } func NewEthMocksWithStartupAssertions(t testing.TB) *evmMocks.Client { testutils.SkipShort(t, "long test") - c, s := NewEthMocks(t) + c := NewEthMocks(t) c.On("Dial", mock.Anything).Maybe().Return(nil) c.On("SubscribeNewHead", mock.Anything, mock.Anything).Maybe().Return(EmptyMockSubscription(t), nil) c.On("SendTransaction", mock.Anything, mock.Anything).Maybe().Return(nil) @@ -471,15 +470,13 @@ func NewEthMocksWithStartupAssertions(t testing.TB) *evmMocks.Client { }) c.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(block, nil) - s.On("Err").Return(nil).Maybe() - s.On("Unsubscribe").Return(nil).Maybe() return c } // NewEthMocksWithTransactionsOnBlocksAssertions sets an Eth mock with transactions on blocks func NewEthMocksWithTransactionsOnBlocksAssertions(t testing.TB) *evmMocks.Client { testutils.SkipShort(t, "long test") - c, s := NewEthMocks(t) + c := NewEthMocks(t) c.On("Dial", mock.Anything).Maybe().Return(nil) c.On("SubscribeNewHead", mock.Anything, mock.Anything).Maybe().Return(EmptyMockSubscription(t), nil) c.On("SendTransaction", mock.Anything, mock.Anything).Maybe().Return(nil) @@ -507,9 +504,6 @@ func NewEthMocksWithTransactionsOnBlocksAssertions(t testing.TB) *evmMocks.Clien }) c.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(block, nil) - s.On("Err").Return(nil).Maybe() - s.On("Unsubscribe").Return(nil).Maybe() - return c } @@ -849,7 +843,7 @@ const ( AssertNoActionTimeout = 3 * time.Second ) -// WaitTimeout is just preserved for compatabilty. Use testutils.WaitTimeout directly instead. +// WaitTimeout is just preserved for compatibility. Use testutils.WaitTimeout directly instead. // Deprecated var WaitTimeout = testutils.WaitTimeout @@ -1510,7 +1504,7 @@ func AssertRecordEventually(t *testing.T, db *sqlx.DB, model interface{}, stmt s } func MustSendingKeyStates(t *testing.T, ethKeyStore keystore.Eth) []ethkey.State { - keys, err := ethKeyStore.SendingKeys() + keys, err := ethKeyStore.SendingKeys(nil) require.NoError(t, err) states, err := ethKeyStore.GetStatesForKeys(keys) require.NoError(t, err) diff --git a/core/internal/cltest/event_websocket_server.go b/core/internal/cltest/event_websocket_server.go index 6689df024a8..64b3e90cd81 100644 --- a/core/internal/cltest/event_websocket_server.go +++ b/core/internal/cltest/event_websocket_server.go @@ -19,6 +19,7 @@ type EventWebSocketServer struct { t *testing.T connections []*websocket.Conn Connected chan struct{} + Disconnected chan struct{} ReceivedText chan string ReceivedBinary chan []byte URL *url.URL @@ -30,6 +31,7 @@ func NewEventWebSocketServer(t *testing.T) (*EventWebSocketServer, func()) { mutex: &sync.RWMutex{}, t: t, Connected: make(chan struct{}, 1), // have buffer of one for easier assertions after the event + Disconnected: make(chan struct{}, 1), // have buffer of one for easier assertions after the event ReceivedText: make(chan string, 100), ReceivedBinary: make(chan []byte, 100), } @@ -46,6 +48,12 @@ func NewEventWebSocketServer(t *testing.T) (*EventWebSocketServer, func()) { } } +func (wss EventWebSocketServer) ConnectionsCount() int { + wss.mutex.RLock() + defer wss.mutex.RUnlock() + return len(wss.connections) +} + // Broadcast sends a message to every web socket client connected to the EventWebSocketServer func (wss *EventWebSocketServer) Broadcast(message string) error { wss.mutex.RLock() @@ -139,4 +147,8 @@ func (wss *EventWebSocketServer) removeConnection(conn *websocket.Conn) { } wss.connections = newc wss.mutex.Unlock() + select { // broadcast disconnected event + case wss.Disconnected <- struct{}{}: + default: + } } diff --git a/core/internal/cltest/factories.go b/core/internal/cltest/factories.go index eb9c6e2ee38..b78c7a8ac3a 100644 --- a/core/internal/cltest/factories.go +++ b/core/internal/cltest/factories.go @@ -554,7 +554,7 @@ func MustInsertKeeperJob(t *testing.T, db *sqlx.DB, korm keeper.ORM, from ethkey return jb } -func MustInsertKeeperRegistry(t *testing.T, db *sqlx.DB, korm keeper.ORM, ethKeyStore keystore.Eth) (keeper.Registry, job.Job) { +func MustInsertKeeperRegistry(t *testing.T, db *sqlx.DB, korm keeper.ORM, ethKeyStore keystore.Eth, keeperIndex, numKeepers, blockCountPerTurn int32) (keeper.Registry, job.Job) { key, _ := MustAddRandomKeyToKeystore(t, ethKeyStore) from := key.Address t.Helper() @@ -562,12 +562,15 @@ func MustInsertKeeperRegistry(t *testing.T, db *sqlx.DB, korm keeper.ORM, ethKey job := MustInsertKeeperJob(t, db, korm, from, contractAddress) registry := keeper.Registry{ ContractAddress: contractAddress, - BlockCountPerTurn: 20, + BlockCountPerTurn: blockCountPerTurn, CheckGas: 150_000, FromAddress: from, JobID: job.ID, - KeeperIndex: 0, - NumKeepers: 1, + KeeperIndex: keeperIndex, + NumKeepers: numKeepers, + KeeperIndexMap: map[ethkey.EIP55Address]int32{ + from: keeperIndex, + }, } err := korm.UpsertRegistry(®istry) require.NoError(t, err) diff --git a/core/internal/cltest/heavyweight/orm.go b/core/internal/cltest/heavyweight/orm.go index 1f1141944ca..d9c33ba86d0 100644 --- a/core/internal/cltest/heavyweight/orm.go +++ b/core/internal/cltest/heavyweight/orm.go @@ -19,20 +19,38 @@ import ( "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v4" + "github.com/smartcontractkit/chainlink/core/cmd" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/internal/testutils/configtest" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/pg" "github.com/smartcontractkit/chainlink/core/store/dialects" - migrations "github.com/smartcontractkit/chainlink/core/store/migrate" ) -// FullTestDB creates an DB which runs in a separate database than the normal -// unit tests, so you can do things like use other Postgres connection types -// with it. -func FullTestDB(t *testing.T, name string, migrate bool, loadFixtures bool) (*configtest.TestGeneralConfig, *sqlx.DB) { +// FullTestDB creates a pristine DB which runs in a separate database than the normal +// unit tests, so you can do things like use other Postgres connection types with it. +func FullTestDB(t *testing.T, name string) (*configtest.TestGeneralConfig, *sqlx.DB) { + return prepareFullTestDB(t, name, false, true) +} + +// FullTestDBNoFixtures is the same as FullTestDB, but it does not load fixtures. +func FullTestDBNoFixtures(t *testing.T, name string) (*configtest.TestGeneralConfig, *sqlx.DB) { + return prepareFullTestDB(t, name, false, false) +} + +// FullTestDBEmpty creates an empty DB (without migrations). +func FullTestDBEmpty(t *testing.T, name string) (*configtest.TestGeneralConfig, *sqlx.DB) { + return prepareFullTestDB(t, name, true, false) +} + +func prepareFullTestDB(t *testing.T, name string, empty bool, loadFixtures bool) (*configtest.TestGeneralConfig, *sqlx.DB) { testutils.SkipShort(t, "FullTestDB") + + if empty && loadFixtures { + t.Fatal("could not load fixtures into an empty DB") + } + overrides := configtest.GeneralConfigOverrides{ SecretGenerator: cltest.MockSecretGenerator{}, } @@ -40,7 +58,7 @@ func FullTestDB(t *testing.T, name string, migrate bool, loadFixtures bool) (*co gcfg.Overrides.Dialect = dialects.Postgres require.NoError(t, os.MkdirAll(gcfg.RootDir(), 0700)) - migrationTestDBURL, err := dropAndCreateThrowawayTestDB(gcfg.DatabaseURL(), name) + migrationTestDBURL, err := dropAndCreateThrowawayTestDB(gcfg.DatabaseURL(), name, empty) require.NoError(t, err) lggr := logger.TestLogger(t) db, err := pg.NewConnection(migrationTestDBURL, string(dialects.Postgres), pg.Config{ @@ -54,13 +72,11 @@ func FullTestDB(t *testing.T, name string, migrate bool, loadFixtures bool) (*co os.RemoveAll(gcfg.RootDir()) }) gcfg.Overrides.DatabaseURL = null.StringFrom(migrationTestDBURL) - if migrate { - require.NoError(t, migrations.Migrate(db.DB, lggr)) - } + if loadFixtures { - _, filename, _, ok := runtime.Caller(0) + _, filename, _, ok := runtime.Caller(1) if !ok { - t.Fatal("could not get runtime.Caller(0)") + t.Fatal("could not get runtime.Caller(1)") } filepath := path.Join(path.Dir(filename), "../../../store/fixtures/fixtures.sql") fixturesSQL, err := ioutil.ReadFile(filepath) @@ -72,7 +88,7 @@ func FullTestDB(t *testing.T, name string, migrate bool, loadFixtures bool) (*co return gcfg, db } -func dropAndCreateThrowawayTestDB(parsed url.URL, postfix string) (string, error) { +func dropAndCreateThrowawayTestDB(parsed url.URL, postfix string, empty bool) (string, error) { if parsed.Path == "" { return "", errors.New("path missing from database URL") } @@ -94,10 +110,13 @@ func dropAndCreateThrowawayTestDB(parsed url.URL, postfix string) (string, error if err != nil { return "", fmt.Errorf("unable to drop postgres migrations test database: %v", err) } - // `CREATE DATABASE $1` does not seem to work w CREATE DATABASE - _, err = db.Exec(fmt.Sprintf("CREATE DATABASE %s", dbname)) + if empty { + _, err = db.Exec(fmt.Sprintf("CREATE DATABASE %s", dbname)) + } else { + _, err = db.Exec(fmt.Sprintf("CREATE DATABASE %s WITH TEMPLATE %s", dbname, cmd.PristineDBName)) + } if err != nil { - return "", fmt.Errorf("unable to create postgres migrations test database with name '%s': %v", dbname, err) + return "", fmt.Errorf("unable to create postgres test database with name '%s': %v", dbname, err) } parsed.Path = fmt.Sprintf("/%s", dbname) return parsed.String(), nil diff --git a/core/internal/cltest/mocks.go b/core/internal/cltest/mocks.go index dd9e059434c..2393b5b818a 100644 --- a/core/internal/cltest/mocks.go +++ b/core/internal/cltest/mocks.go @@ -12,6 +12,9 @@ import ( "testing" "time" + "github.com/smartcontractkit/sqlx" + "go.uber.org/atomic" + "github.com/smartcontractkit/chainlink/core/chains/evm" evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" evmconfig "github.com/smartcontractkit/chainlink/core/chains/evm/config" @@ -23,8 +26,6 @@ import ( "github.com/smartcontractkit/chainlink/core/services/chainlink" "github.com/smartcontractkit/chainlink/core/sessions" "github.com/smartcontractkit/chainlink/core/web" - "github.com/smartcontractkit/sqlx" - "go.uber.org/atomic" gethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/robfig/cron/v3" diff --git a/core/internal/features_ocr2_test.go b/core/internal/features_ocr2_test.go index e28fb4e10ae..8f0ae130cb4 100644 --- a/core/internal/features_ocr2_test.go +++ b/core/internal/features_ocr2_test.go @@ -87,7 +87,7 @@ func setupOCR2Contracts(t *testing.T) (*bind.TransactOpts, *backends.SimulatedBa } func setupNodeOCR2(t *testing.T, owner *bind.TransactOpts, port uint16, dbName string, b *backends.SimulatedBackend) (*cltest.TestApplication, string, common.Address, ocr2key.KeyBundle, *configtest.TestGeneralConfig) { - config, _ := heavyweight.FullTestDB(t, fmt.Sprintf("%s%d", dbName, port), true, true) + config, _ := heavyweight.FullTestDB(t, fmt.Sprintf("%s%d", dbName, port)) config.Overrides.FeatureOffchainReporting = null.BoolFrom(false) config.Overrides.FeatureOffchainReporting2 = null.BoolFrom(true) config.Overrides.P2PEnabled = null.BoolFrom(true) @@ -113,7 +113,7 @@ func setupNodeOCR2(t *testing.T, owner *bind.TransactOpts, port uint16, dbName s config.Overrides.P2PPeerID = peerID - sendingKeys, err := app.KeyStore.Eth().SendingKeys() + sendingKeys, err := app.KeyStore.Eth().SendingKeys(nil) require.NoError(t, err) require.Len(t, sendingKeys, 1) transmitter := sendingKeys[0].Address.Address() diff --git a/core/internal/features_test.go b/core/internal/features_test.go index e696051e8b3..733950d4fc4 100644 --- a/core/internal/features_test.go +++ b/core/internal/features_test.go @@ -16,6 +16,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" @@ -352,7 +353,7 @@ func TestIntegration_DirectRequest(t *testing.T) { b := operatorContracts.sim app := cltest.NewApplicationWithConfigAndKeyOnSimulatedBlockchain(t, config, b) - sendingKeys, err := app.KeyStore.Eth().SendingKeys() + sendingKeys, err := app.KeyStore.Eth().SendingKeys(nil) require.NoError(t, err) authorizedSenders := []common.Address{sendingKeys[0].Address.Address()} tx, err := operatorContracts.operator.SetAuthorizedSenders(operatorContracts.user, authorizedSenders) @@ -494,7 +495,7 @@ func setupOCRContracts(t *testing.T) (*bind.TransactOpts, *backends.SimulatedBac } func setupNode(t *testing.T, owner *bind.TransactOpts, portV1, portV2 int, dbName string, b *backends.SimulatedBackend, ns ocrnetworking.NetworkingStack) (*cltest.TestApplication, string, common.Address, ocrkey.KeyV2, *configtest.TestGeneralConfig) { - config, _ := heavyweight.FullTestDB(t, fmt.Sprintf("%s%d", dbName, portV1), true, true) + config, _ := heavyweight.FullTestDB(t, fmt.Sprintf("%s%d", dbName, portV1)) config.Overrides.Dev = null.BoolFrom(true) // Disables ocr spec validation so we can have fast polling for the test. config.Overrides.FeatureOffchainReporting = null.BoolFrom(true) config.Overrides.FeatureOffchainReporting2 = null.BoolFrom(true) @@ -531,7 +532,7 @@ func setupNode(t *testing.T, owner *bind.TransactOpts, portV1, portV2 int, dbNam config.Overrides.P2PV2ListenAddresses = []string{fmt.Sprintf("127.0.0.1:%d", portV2)} } - sendingKeys, err := app.KeyStore.Eth().SendingKeys() + sendingKeys, err := app.KeyStore.Eth().SendingKeys(nil) require.NoError(t, err) transmitter := sendingKeys[0].Address.Address() @@ -795,8 +796,8 @@ func TestIntegration_BlockHistoryEstimator(t *testing.T) { cfg := cltest.NewTestGeneralConfig(t) cfg.Overrides.GlobalBalanceMonitorEnabled = null.BoolFrom(false) - ethClient, sub := cltest.NewEthMocksWithDefaultChain(t) - chchNewHeads := make(chan chan<- *evmtypes.Head, 1) + ethClient := cltest.NewEthMocksWithDefaultChain(t) + chchNewHeads := make(chan evmtest.RawSub[*evmtypes.Head], 1) db := pgtest.NewSqlxDB(t) kst := cltest.NewKeyStore(t, db, cfg) @@ -831,12 +832,16 @@ func TestIntegration_BlockHistoryEstimator(t *testing.T) { h41 := evmtypes.Head{Hash: b41.Hash, ParentHash: h40.Hash, Number: 41, EVMChainID: evmChainID} h42 := evmtypes.Head{Hash: b42.Hash, ParentHash: h41.Hash, Number: 42, EVMChainID: evmChainID} - sub.On("Err").Return(nil) - sub.On("Unsubscribe").Return(nil).Maybe() - + mockEth := &evmtest.MockEth{EthClient: ethClient} ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). - Run(func(args mock.Arguments) { chchNewHeads <- args.Get(1).(chan<- *evmtypes.Head) }). - Return(sub, nil) + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchNewHeads <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) // Nonce syncer ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Maybe().Return(uint64(0), nil) @@ -857,7 +862,7 @@ func TestIntegration_BlockHistoryEstimator(t *testing.T) { ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(oneETH.ToInt(), nil) require.NoError(t, cc.Start(testutils.Context(t))) - var newHeads chan<- *evmtypes.Head + var newHeads evmtest.RawSub[*evmtypes.Head] select { case newHeads = <-chchNewHeads: case <-time.After(10 * time.Second): @@ -885,7 +890,7 @@ func TestIntegration_BlockHistoryEstimator(t *testing.T) { ethClient.On("HeadByNumber", mock.Anything, big.NewInt(41)).Return(&h41, nil) // Simulate one new head and check the gas price got updated - newHeads <- cltest.Head(43) + newHeads.TrySend(cltest.Head(43)) gomega.NewWithT(t).Eventually(func() string { gasPrice, _, err := estimator.GetLegacyGas(nil, 500000) @@ -895,7 +900,7 @@ func TestIntegration_BlockHistoryEstimator(t *testing.T) { } func triggerAllKeys(t *testing.T, app *cltest.TestApplication) { - keys, err := app.KeyStore.Eth().SendingKeys() + keys, err := app.KeyStore.Eth().SendingKeys(nil) require.NoError(t, err) // FIXME: This is a hack. Remove after https://app.clubhouse.io/chainlinklabs/story/15103/use-in-memory-event-broadcaster-instead-of-postgres-event-broadcaster-in-transactional-tests-so-it-actually-works for _, chain := range app.GetChains().EVM.Chains() { diff --git a/core/internal/gethwrappers/generated/authorized_forwarder/authorized_forwarder.go b/core/internal/gethwrappers/generated/authorized_forwarder/authorized_forwarder.go new file mode 100644 index 00000000000..dfa46d2eee2 --- /dev/null +++ b/core/internal/gethwrappers/generated/authorized_forwarder/authorized_forwarder.go @@ -0,0 +1,968 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package authorized_forwarder + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +var AuthorizedForwarderMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"OwnershipTransferRequestedWithMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"forward\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAuthorizedSenders\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getChainlinkToken\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"isAuthorizedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"ownerForward\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"setAuthorizedSenders\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"transferOwnershipWithMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b506040516200126138038062001261833981810160405260808110156200003757600080fd5b8151602083015160408085015160608601805192519496939591949391820192846401000000008211156200006b57600080fd5b9083019060208201858111156200008157600080fd5b82516401000000008111828201881017156200009c57600080fd5b82525081516020918201929091019080838360005b83811015620000cb578181015183820152602001620000b1565b50505050905090810190601f168015620000f95780820380516001836020036101000a031916815260200191505b50604052508491508390506001600160a01b03821662000160576040805162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f0000000000000000604482015290519081900360640190fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156200019357620001938162000272565b50506001600160601b0319606085901b166080526001600160a01b038216156200026857816001600160a01b0316836001600160a01b03167f4e1e878dc28d5f040db5969163ff1acd75c44c3f655da2dde9c70bbd8e56dc7e836040518080602001828103825283818151815260200191508051906020019080838360005b838110156200022c57818101518382015260200162000212565b50505050905090810190601f1680156200025a5780820380516001836020036101000a031916815260200191505b509250505060405180910390a35b5050505062000322565b6001600160a01b038116331415620002d1576040805162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60805160601c610f1c6200034560003980610491528061061d5250610f1c6000f3fe608060405234801561001057600080fd5b50600436106100c95760003560e01c80636fadcf7211610081578063ee56997b1161005b578063ee56997b1461038d578063f2fde38b146103fd578063fa00763a14610430576100c9565b80636fadcf72146102f057806379ba50971461037d5780638da5cb5b14610385576100c9565b8063181f5a77116100b2578063181f5a771461018e5780632408afaa1461020b5780634d3e232314610263576100c9565b8063033f49f7146100ce578063165d35e11461015d575b600080fd5b61015b600480360360408110156100e457600080fd5b73ffffffffffffffffffffffffffffffffffffffff823516919081019060408101602082013564010000000081111561011c57600080fd5b82018360208201111561012e57600080fd5b8035906020019184600183028401116401000000008311171561015057600080fd5b509092509050610477565b005b61016561048f565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190f35b6101966104b3565b6040805160208082528351818301528351919283929083019185019080838360005b838110156101d05781810151838201526020016101b8565b50505050905090810190601f1680156101fd5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b6102136104ea565b60408051602080825283518183015283519192839290830191858101910280838360005b8381101561024f578181015183820152602001610237565b505050509050019250505060405180910390f35b61015b6004803603604081101561027957600080fd5b73ffffffffffffffffffffffffffffffffffffffff82351691908101906040810160208201356401000000008111156102b157600080fd5b8201836020820111156102c357600080fd5b803590602001918460018302840111640100000000831117156102e557600080fd5b509092509050610559565b61015b6004803603604081101561030657600080fd5b73ffffffffffffffffffffffffffffffffffffffff823516919081019060408101602082013564010000000081111561033e57600080fd5b82018360208201111561035057600080fd5b8035906020019184600183028401116401000000008311171561037257600080fd5b509092509050610613565b61015b6106d6565b6101656107d8565b61015b600480360360208110156103a357600080fd5b8101906020810181356401000000008111156103be57600080fd5b8201836020820111156103d057600080fd5b803590602001918460208302840111640100000000831117156103f257600080fd5b5090925090506107f4565b61015b6004803603602081101561041357600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610a79565b6104636004803603602081101561044657600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610a8d565b604080519115158252519081900360200190f35b61047f610ab8565b61048a838383610b40565b505050565b7f000000000000000000000000000000000000000000000000000000000000000081565b60408051808201909152601981527f417574686f72697a6564466f7277617264657220312e302e3000000000000000602082015290565b6060600380548060200260200160405190810160405280929190818152602001828054801561054f57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610524575b5050505050905090565b61056283610a79565b8273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f4e1e878dc28d5f040db5969163ff1acd75c44c3f655da2dde9c70bbd8e56dc7e848460405180806020018281038252848482818152602001925080828437600083820152604051601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018290039550909350505050a3505050565b61061b610cb0565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff16141561047f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f43616e6e6f7420666f727761726420746f204c696e6b20746f6b656e00000000604482015290519081900360640190fd5b60015473ffffffffffffffffffffffffffffffffffffffff16331461075c57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015290519081900360640190fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60005473ffffffffffffffffffffffffffffffffffffffff1690565b6107fc610d24565b61086757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f43616e6e6f742073657420617574686f72697a65642073656e64657273000000604482015290519081900360640190fd5b806108bd576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526026815260200180610eea6026913960400191505060405180910390fd5b60035460005b8181101561094557600060026000600384815481106108de57fe5b60009182526020808320919091015473ffffffffffffffffffffffffffffffffffffffff168352820192909252604001902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169115159190911790556001016108c3565b5060005b828110156109c75760016002600086868581811061096357fe5b6020908102929092013573ffffffffffffffffffffffffffffffffffffffff1683525081019190915260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055600101610949565b506109d460038484610e4c565b507ff263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a083833360405180806020018373ffffffffffffffffffffffffffffffffffffffff1681526020018281038252858582818152602001925060200280828437600083820152604051601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016909201829003965090945050505050a1505050565b610a81610ab8565b610a8a81610d4b565b50565b73ffffffffffffffffffffffffffffffffffffffff1660009081526002602052604090205460ff1690565b60005473ffffffffffffffffffffffffffffffffffffffff163314610b3e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b565b610b5f8373ffffffffffffffffffffffffffffffffffffffff16610e46565b610bca57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f4d75737420666f727761726420746f206120636f6e7472616374000000000000604482015290519081900360640190fd5b60008373ffffffffffffffffffffffffffffffffffffffff168383604051808383808284376040519201945060009350909150508083038183865af19150503d8060008114610c35576040519150601f19603f3d011682016040523d82523d6000602084013e610c3a565b606091505b5050905080610caa57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f466f727761726465642063616c6c206661696c65640000000000000000000000604482015290519081900360640190fd5b50505050565b610cb933610a8d565b610b3e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f4e6f7420617574686f72697a65642073656e6465720000000000000000000000604482015290519081900360640190fd5b600033610d2f6107d8565b73ffffffffffffffffffffffffffffffffffffffff1614905090565b73ffffffffffffffffffffffffffffffffffffffff8116331415610dd057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b3b151590565b828054828255906000526020600020908101928215610ec4579160200282015b82811115610ec45781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190610e6c565b50610ed0929150610ed4565b5090565b5b80821115610ed05760008155600101610ed556fe4d7573742068617665206174206c65617374203120617574686f72697a65642073656e646572a164736f6c6343000706000a", +} + +var AuthorizedForwarderABI = AuthorizedForwarderMetaData.ABI + +var AuthorizedForwarderBin = AuthorizedForwarderMetaData.Bin + +func DeployAuthorizedForwarder(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, owner common.Address, recipient common.Address, message []byte) (common.Address, *types.Transaction, *AuthorizedForwarder, error) { + parsed, err := AuthorizedForwarderMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(AuthorizedForwarderBin), backend, link, owner, recipient, message) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &AuthorizedForwarder{AuthorizedForwarderCaller: AuthorizedForwarderCaller{contract: contract}, AuthorizedForwarderTransactor: AuthorizedForwarderTransactor{contract: contract}, AuthorizedForwarderFilterer: AuthorizedForwarderFilterer{contract: contract}}, nil +} + +type AuthorizedForwarder struct { + address common.Address + abi abi.ABI + AuthorizedForwarderCaller + AuthorizedForwarderTransactor + AuthorizedForwarderFilterer +} + +type AuthorizedForwarderCaller struct { + contract *bind.BoundContract +} + +type AuthorizedForwarderTransactor struct { + contract *bind.BoundContract +} + +type AuthorizedForwarderFilterer struct { + contract *bind.BoundContract +} + +type AuthorizedForwarderSession struct { + Contract *AuthorizedForwarder + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AuthorizedForwarderCallerSession struct { + Contract *AuthorizedForwarderCaller + CallOpts bind.CallOpts +} + +type AuthorizedForwarderTransactorSession struct { + Contract *AuthorizedForwarderTransactor + TransactOpts bind.TransactOpts +} + +type AuthorizedForwarderRaw struct { + Contract *AuthorizedForwarder +} + +type AuthorizedForwarderCallerRaw struct { + Contract *AuthorizedForwarderCaller +} + +type AuthorizedForwarderTransactorRaw struct { + Contract *AuthorizedForwarderTransactor +} + +func NewAuthorizedForwarder(address common.Address, backend bind.ContractBackend) (*AuthorizedForwarder, error) { + abi, err := abi.JSON(strings.NewReader(AuthorizedForwarderABI)) + if err != nil { + return nil, err + } + contract, err := bindAuthorizedForwarder(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AuthorizedForwarder{address: address, abi: abi, AuthorizedForwarderCaller: AuthorizedForwarderCaller{contract: contract}, AuthorizedForwarderTransactor: AuthorizedForwarderTransactor{contract: contract}, AuthorizedForwarderFilterer: AuthorizedForwarderFilterer{contract: contract}}, nil +} + +func NewAuthorizedForwarderCaller(address common.Address, caller bind.ContractCaller) (*AuthorizedForwarderCaller, error) { + contract, err := bindAuthorizedForwarder(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AuthorizedForwarderCaller{contract: contract}, nil +} + +func NewAuthorizedForwarderTransactor(address common.Address, transactor bind.ContractTransactor) (*AuthorizedForwarderTransactor, error) { + contract, err := bindAuthorizedForwarder(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AuthorizedForwarderTransactor{contract: contract}, nil +} + +func NewAuthorizedForwarderFilterer(address common.Address, filterer bind.ContractFilterer) (*AuthorizedForwarderFilterer, error) { + contract, err := bindAuthorizedForwarder(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AuthorizedForwarderFilterer{contract: contract}, nil +} + +func bindAuthorizedForwarder(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(AuthorizedForwarderABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AuthorizedForwarder.Contract.AuthorizedForwarderCaller.contract.Call(opts, result, method, params...) +} + +func (_AuthorizedForwarder *AuthorizedForwarderRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.AuthorizedForwarderTransactor.contract.Transfer(opts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.AuthorizedForwarderTransactor.contract.Transact(opts, method, params...) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AuthorizedForwarder.Contract.contract.Call(opts, result, method, params...) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.contract.Transfer(opts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.contract.Transact(opts, method, params...) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCaller) GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _AuthorizedForwarder.contract.Call(opts, &out, "getAuthorizedSenders") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) GetAuthorizedSenders() ([]common.Address, error) { + return _AuthorizedForwarder.Contract.GetAuthorizedSenders(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCallerSession) GetAuthorizedSenders() ([]common.Address, error) { + return _AuthorizedForwarder.Contract.GetAuthorizedSenders(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCaller) GetChainlinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AuthorizedForwarder.contract.Call(opts, &out, "getChainlinkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) GetChainlinkToken() (common.Address, error) { + return _AuthorizedForwarder.Contract.GetChainlinkToken(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCallerSession) GetChainlinkToken() (common.Address, error) { + return _AuthorizedForwarder.Contract.GetChainlinkToken(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCaller) IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) { + var out []interface{} + err := _AuthorizedForwarder.contract.Call(opts, &out, "isAuthorizedSender", sender) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _AuthorizedForwarder.Contract.IsAuthorizedSender(&_AuthorizedForwarder.CallOpts, sender) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCallerSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _AuthorizedForwarder.Contract.IsAuthorizedSender(&_AuthorizedForwarder.CallOpts, sender) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AuthorizedForwarder.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) Owner() (common.Address, error) { + return _AuthorizedForwarder.Contract.Owner(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCallerSession) Owner() (common.Address, error) { + return _AuthorizedForwarder.Contract.Owner(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _AuthorizedForwarder.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) TypeAndVersion() (string, error) { + return _AuthorizedForwarder.Contract.TypeAndVersion(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCallerSession) TypeAndVersion() (string, error) { + return _AuthorizedForwarder.Contract.TypeAndVersion(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "acceptOwnership") +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) AcceptOwnership() (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.AcceptOwnership(&_AuthorizedForwarder.TransactOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.AcceptOwnership(&_AuthorizedForwarder.TransactOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) Forward(opts *bind.TransactOpts, to common.Address, data []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "forward", to, data) +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) Forward(to common.Address, data []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.Forward(&_AuthorizedForwarder.TransactOpts, to, data) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) Forward(to common.Address, data []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.Forward(&_AuthorizedForwarder.TransactOpts, to, data) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) OwnerForward(opts *bind.TransactOpts, to common.Address, data []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "ownerForward", to, data) +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) OwnerForward(to common.Address, data []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.OwnerForward(&_AuthorizedForwarder.TransactOpts, to, data) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) OwnerForward(to common.Address, data []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.OwnerForward(&_AuthorizedForwarder.TransactOpts, to, data) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "setAuthorizedSenders", senders) +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.SetAuthorizedSenders(&_AuthorizedForwarder.TransactOpts, senders) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.SetAuthorizedSenders(&_AuthorizedForwarder.TransactOpts, senders) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "transferOwnership", to) +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.TransferOwnership(&_AuthorizedForwarder.TransactOpts, to) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.TransferOwnership(&_AuthorizedForwarder.TransactOpts, to) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) TransferOwnershipWithMessage(opts *bind.TransactOpts, to common.Address, message []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "transferOwnershipWithMessage", to, message) +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) TransferOwnershipWithMessage(to common.Address, message []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.TransferOwnershipWithMessage(&_AuthorizedForwarder.TransactOpts, to, message) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) TransferOwnershipWithMessage(to common.Address, message []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.TransferOwnershipWithMessage(&_AuthorizedForwarder.TransactOpts, to, message) +} + +type AuthorizedForwarderAuthorizedSendersChangedIterator struct { + Event *AuthorizedForwarderAuthorizedSendersChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AuthorizedForwarderAuthorizedSendersChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AuthorizedForwarderAuthorizedSendersChangedIterator) Error() error { + return it.fail +} + +func (it *AuthorizedForwarderAuthorizedSendersChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AuthorizedForwarderAuthorizedSendersChanged struct { + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*AuthorizedForwarderAuthorizedSendersChangedIterator, error) { + + logs, sub, err := _AuthorizedForwarder.contract.FilterLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return &AuthorizedForwarderAuthorizedSendersChangedIterator{contract: _AuthorizedForwarder.contract, event: "AuthorizedSendersChanged", logs: logs, sub: sub}, nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderAuthorizedSendersChanged) (event.Subscription, error) { + + logs, sub, err := _AuthorizedForwarder.contract.WatchLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AuthorizedForwarderAuthorizedSendersChanged) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) ParseAuthorizedSendersChanged(log types.Log) (*AuthorizedForwarderAuthorizedSendersChanged, error) { + event := new(AuthorizedForwarderAuthorizedSendersChanged) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AuthorizedForwarderOwnershipTransferRequestedIterator struct { + Event *AuthorizedForwarderOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AuthorizedForwarderOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AuthorizedForwarderOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AuthorizedForwarderOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AuthorizedForwarderOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AuthorizedForwarderOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AuthorizedForwarder.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &AuthorizedForwarderOwnershipTransferRequestedIterator{contract: _AuthorizedForwarder.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AuthorizedForwarder.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AuthorizedForwarderOwnershipTransferRequested) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) ParseOwnershipTransferRequested(log types.Log) (*AuthorizedForwarderOwnershipTransferRequested, error) { + event := new(AuthorizedForwarderOwnershipTransferRequested) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator struct { + Event *AuthorizedForwarderOwnershipTransferRequestedWithMessage + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderOwnershipTransferRequestedWithMessage) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderOwnershipTransferRequestedWithMessage) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator) Error() error { + return it.fail +} + +func (it *AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AuthorizedForwarderOwnershipTransferRequestedWithMessage struct { + From common.Address + To common.Address + Message []byte + Raw types.Log +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) FilterOwnershipTransferRequestedWithMessage(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AuthorizedForwarder.contract.FilterLogs(opts, "OwnershipTransferRequestedWithMessage", fromRule, toRule) + if err != nil { + return nil, err + } + return &AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator{contract: _AuthorizedForwarder.contract, event: "OwnershipTransferRequestedWithMessage", logs: logs, sub: sub}, nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) WatchOwnershipTransferRequestedWithMessage(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderOwnershipTransferRequestedWithMessage, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AuthorizedForwarder.contract.WatchLogs(opts, "OwnershipTransferRequestedWithMessage", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AuthorizedForwarderOwnershipTransferRequestedWithMessage) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "OwnershipTransferRequestedWithMessage", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) ParseOwnershipTransferRequestedWithMessage(log types.Log) (*AuthorizedForwarderOwnershipTransferRequestedWithMessage, error) { + event := new(AuthorizedForwarderOwnershipTransferRequestedWithMessage) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "OwnershipTransferRequestedWithMessage", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AuthorizedForwarderOwnershipTransferredIterator struct { + Event *AuthorizedForwarderOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AuthorizedForwarderOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AuthorizedForwarderOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *AuthorizedForwarderOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AuthorizedForwarderOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AuthorizedForwarderOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AuthorizedForwarder.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &AuthorizedForwarderOwnershipTransferredIterator{contract: _AuthorizedForwarder.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AuthorizedForwarder.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AuthorizedForwarderOwnershipTransferred) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) ParseOwnershipTransferred(log types.Log) (*AuthorizedForwarderOwnershipTransferred, error) { + event := new(AuthorizedForwarderOwnershipTransferred) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_AuthorizedForwarder *AuthorizedForwarder) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _AuthorizedForwarder.abi.Events["AuthorizedSendersChanged"].ID: + return _AuthorizedForwarder.ParseAuthorizedSendersChanged(log) + case _AuthorizedForwarder.abi.Events["OwnershipTransferRequested"].ID: + return _AuthorizedForwarder.ParseOwnershipTransferRequested(log) + case _AuthorizedForwarder.abi.Events["OwnershipTransferRequestedWithMessage"].ID: + return _AuthorizedForwarder.ParseOwnershipTransferRequestedWithMessage(log) + case _AuthorizedForwarder.abi.Events["OwnershipTransferred"].ID: + return _AuthorizedForwarder.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (AuthorizedForwarderAuthorizedSendersChanged) Topic() common.Hash { + return common.HexToHash("0xf263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0") +} + +func (AuthorizedForwarderOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (AuthorizedForwarderOwnershipTransferRequestedWithMessage) Topic() common.Hash { + return common.HexToHash("0x4e1e878dc28d5f040db5969163ff1acd75c44c3f655da2dde9c70bbd8e56dc7e") +} + +func (AuthorizedForwarderOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_AuthorizedForwarder *AuthorizedForwarder) Address() common.Address { + return _AuthorizedForwarder.address +} + +type AuthorizedForwarderInterface interface { + GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) + + GetChainlinkToken(opts *bind.CallOpts) (common.Address, error) + + IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + Forward(opts *bind.TransactOpts, to common.Address, data []byte) (*types.Transaction, error) + + OwnerForward(opts *bind.TransactOpts, to common.Address, data []byte) (*types.Transaction, error) + + SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferOwnershipWithMessage(opts *bind.TransactOpts, to common.Address, message []byte) (*types.Transaction, error) + + FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*AuthorizedForwarderAuthorizedSendersChangedIterator, error) + + WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderAuthorizedSendersChanged) (event.Subscription, error) + + ParseAuthorizedSendersChanged(log types.Log) (*AuthorizedForwarderAuthorizedSendersChanged, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AuthorizedForwarderOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*AuthorizedForwarderOwnershipTransferRequested, error) + + FilterOwnershipTransferRequestedWithMessage(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator, error) + + WatchOwnershipTransferRequestedWithMessage(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderOwnershipTransferRequestedWithMessage, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequestedWithMessage(log types.Log) (*AuthorizedForwarderOwnershipTransferRequestedWithMessage, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AuthorizedForwarderOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*AuthorizedForwarderOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/internal/gethwrappers/generated/authorized_receiver/authorized_receiver.go b/core/internal/gethwrappers/generated/authorized_receiver/authorized_receiver.go new file mode 100644 index 00000000000..2f9b78c4955 --- /dev/null +++ b/core/internal/gethwrappers/generated/authorized_receiver/authorized_receiver.go @@ -0,0 +1,362 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package authorized_receiver + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +var AuthorizedReceiverMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersChanged\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"getAuthorizedSenders\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"isAuthorizedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"setAuthorizedSenders\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +var AuthorizedReceiverABI = AuthorizedReceiverMetaData.ABI + +type AuthorizedReceiver struct { + address common.Address + abi abi.ABI + AuthorizedReceiverCaller + AuthorizedReceiverTransactor + AuthorizedReceiverFilterer +} + +type AuthorizedReceiverCaller struct { + contract *bind.BoundContract +} + +type AuthorizedReceiverTransactor struct { + contract *bind.BoundContract +} + +type AuthorizedReceiverFilterer struct { + contract *bind.BoundContract +} + +type AuthorizedReceiverSession struct { + Contract *AuthorizedReceiver + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AuthorizedReceiverCallerSession struct { + Contract *AuthorizedReceiverCaller + CallOpts bind.CallOpts +} + +type AuthorizedReceiverTransactorSession struct { + Contract *AuthorizedReceiverTransactor + TransactOpts bind.TransactOpts +} + +type AuthorizedReceiverRaw struct { + Contract *AuthorizedReceiver +} + +type AuthorizedReceiverCallerRaw struct { + Contract *AuthorizedReceiverCaller +} + +type AuthorizedReceiverTransactorRaw struct { + Contract *AuthorizedReceiverTransactor +} + +func NewAuthorizedReceiver(address common.Address, backend bind.ContractBackend) (*AuthorizedReceiver, error) { + abi, err := abi.JSON(strings.NewReader(AuthorizedReceiverABI)) + if err != nil { + return nil, err + } + contract, err := bindAuthorizedReceiver(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AuthorizedReceiver{address: address, abi: abi, AuthorizedReceiverCaller: AuthorizedReceiverCaller{contract: contract}, AuthorizedReceiverTransactor: AuthorizedReceiverTransactor{contract: contract}, AuthorizedReceiverFilterer: AuthorizedReceiverFilterer{contract: contract}}, nil +} + +func NewAuthorizedReceiverCaller(address common.Address, caller bind.ContractCaller) (*AuthorizedReceiverCaller, error) { + contract, err := bindAuthorizedReceiver(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AuthorizedReceiverCaller{contract: contract}, nil +} + +func NewAuthorizedReceiverTransactor(address common.Address, transactor bind.ContractTransactor) (*AuthorizedReceiverTransactor, error) { + contract, err := bindAuthorizedReceiver(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AuthorizedReceiverTransactor{contract: contract}, nil +} + +func NewAuthorizedReceiverFilterer(address common.Address, filterer bind.ContractFilterer) (*AuthorizedReceiverFilterer, error) { + contract, err := bindAuthorizedReceiver(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AuthorizedReceiverFilterer{contract: contract}, nil +} + +func bindAuthorizedReceiver(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(AuthorizedReceiverABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +func (_AuthorizedReceiver *AuthorizedReceiverRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AuthorizedReceiver.Contract.AuthorizedReceiverCaller.contract.Call(opts, result, method, params...) +} + +func (_AuthorizedReceiver *AuthorizedReceiverRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AuthorizedReceiver.Contract.AuthorizedReceiverTransactor.contract.Transfer(opts) +} + +func (_AuthorizedReceiver *AuthorizedReceiverRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AuthorizedReceiver.Contract.AuthorizedReceiverTransactor.contract.Transact(opts, method, params...) +} + +func (_AuthorizedReceiver *AuthorizedReceiverCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AuthorizedReceiver.Contract.contract.Call(opts, result, method, params...) +} + +func (_AuthorizedReceiver *AuthorizedReceiverTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AuthorizedReceiver.Contract.contract.Transfer(opts) +} + +func (_AuthorizedReceiver *AuthorizedReceiverTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AuthorizedReceiver.Contract.contract.Transact(opts, method, params...) +} + +func (_AuthorizedReceiver *AuthorizedReceiverCaller) GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _AuthorizedReceiver.contract.Call(opts, &out, "getAuthorizedSenders") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_AuthorizedReceiver *AuthorizedReceiverSession) GetAuthorizedSenders() ([]common.Address, error) { + return _AuthorizedReceiver.Contract.GetAuthorizedSenders(&_AuthorizedReceiver.CallOpts) +} + +func (_AuthorizedReceiver *AuthorizedReceiverCallerSession) GetAuthorizedSenders() ([]common.Address, error) { + return _AuthorizedReceiver.Contract.GetAuthorizedSenders(&_AuthorizedReceiver.CallOpts) +} + +func (_AuthorizedReceiver *AuthorizedReceiverCaller) IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) { + var out []interface{} + err := _AuthorizedReceiver.contract.Call(opts, &out, "isAuthorizedSender", sender) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_AuthorizedReceiver *AuthorizedReceiverSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _AuthorizedReceiver.Contract.IsAuthorizedSender(&_AuthorizedReceiver.CallOpts, sender) +} + +func (_AuthorizedReceiver *AuthorizedReceiverCallerSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _AuthorizedReceiver.Contract.IsAuthorizedSender(&_AuthorizedReceiver.CallOpts, sender) +} + +func (_AuthorizedReceiver *AuthorizedReceiverTransactor) SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) { + return _AuthorizedReceiver.contract.Transact(opts, "setAuthorizedSenders", senders) +} + +func (_AuthorizedReceiver *AuthorizedReceiverSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _AuthorizedReceiver.Contract.SetAuthorizedSenders(&_AuthorizedReceiver.TransactOpts, senders) +} + +func (_AuthorizedReceiver *AuthorizedReceiverTransactorSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _AuthorizedReceiver.Contract.SetAuthorizedSenders(&_AuthorizedReceiver.TransactOpts, senders) +} + +type AuthorizedReceiverAuthorizedSendersChangedIterator struct { + Event *AuthorizedReceiverAuthorizedSendersChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AuthorizedReceiverAuthorizedSendersChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AuthorizedReceiverAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AuthorizedReceiverAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AuthorizedReceiverAuthorizedSendersChangedIterator) Error() error { + return it.fail +} + +func (it *AuthorizedReceiverAuthorizedSendersChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AuthorizedReceiverAuthorizedSendersChanged struct { + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_AuthorizedReceiver *AuthorizedReceiverFilterer) FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*AuthorizedReceiverAuthorizedSendersChangedIterator, error) { + + logs, sub, err := _AuthorizedReceiver.contract.FilterLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return &AuthorizedReceiverAuthorizedSendersChangedIterator{contract: _AuthorizedReceiver.contract, event: "AuthorizedSendersChanged", logs: logs, sub: sub}, nil +} + +func (_AuthorizedReceiver *AuthorizedReceiverFilterer) WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *AuthorizedReceiverAuthorizedSendersChanged) (event.Subscription, error) { + + logs, sub, err := _AuthorizedReceiver.contract.WatchLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AuthorizedReceiverAuthorizedSendersChanged) + if err := _AuthorizedReceiver.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AuthorizedReceiver *AuthorizedReceiverFilterer) ParseAuthorizedSendersChanged(log types.Log) (*AuthorizedReceiverAuthorizedSendersChanged, error) { + event := new(AuthorizedReceiverAuthorizedSendersChanged) + if err := _AuthorizedReceiver.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_AuthorizedReceiver *AuthorizedReceiver) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _AuthorizedReceiver.abi.Events["AuthorizedSendersChanged"].ID: + return _AuthorizedReceiver.ParseAuthorizedSendersChanged(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (AuthorizedReceiverAuthorizedSendersChanged) Topic() common.Hash { + return common.HexToHash("0xf263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0") +} + +func (_AuthorizedReceiver *AuthorizedReceiver) Address() common.Address { + return _AuthorizedReceiver.address +} + +type AuthorizedReceiverInterface interface { + GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) + + IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) + + SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) + + FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*AuthorizedReceiverAuthorizedSendersChangedIterator, error) + + WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *AuthorizedReceiverAuthorizedSendersChanged) (event.Subscription, error) + + ParseAuthorizedSendersChanged(log types.Log) (*AuthorizedReceiverAuthorizedSendersChanged, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/internal/gethwrappers/generated/batch_vrf_coordinator_v2/batch_vrf_coordinator_v2.go b/core/internal/gethwrappers/generated/batch_vrf_coordinator_v2/batch_vrf_coordinator_v2.go new file mode 100644 index 00000000000..4be718a11bf --- /dev/null +++ b/core/internal/gethwrappers/generated/batch_vrf_coordinator_v2/batch_vrf_coordinator_v2.go @@ -0,0 +1,527 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package batch_vrf_coordinator_v2 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +type VRFTypesProof struct { + Pk [2]*big.Int + Gamma [2]*big.Int + C *big.Int + S *big.Int + Seed *big.Int + UWitness common.Address + CGammaWitness [2]*big.Int + SHashWitness [2]*big.Int + ZInv *big.Int +} + +type VRFTypesRequestCommitment struct { + BlockNum uint64 + SubId uint64 + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address +} + +var BatchVRFCoordinatorV2MetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinatorAddr\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"reason\",\"type\":\"string\"}],\"name\":\"ErrorReturned\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"lowLevelData\",\"type\":\"bytes\"}],\"name\":\"RawErrorReturned\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"COORDINATOR\",\"outputs\":[{\"internalType\":\"contractVRFCoordinatorV2\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"internalType\":\"structVRFTypes.Proof[]\",\"name\":\"proofs\",\"type\":\"tuple[]\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"blockNum\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"internalType\":\"structVRFTypes.RequestCommitment[]\",\"name\":\"rcs\",\"type\":\"tuple[]\"}],\"name\":\"fulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b50604051610bab380380610bab83398101604081905261002f91610040565b6001600160a01b0316608052610070565b60006020828403121561005257600080fd5b81516001600160a01b038116811461006957600080fd5b9392505050565b608051610b1a610091600039600081816055015261011d0152610b1a6000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806308b2da0a1461003b5780633b2bcbf114610050575b600080fd5b61004e610049366004610655565b6100a0565b005b6100777f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b805182511461010f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f696e70757420617272617920617267206c656e67746873206d69736d61746368604482015260640160405180910390fd5b60005b8251811015610320577f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663af198b97848381518110610169576101696107b4565b6020026020010151848481518110610183576101836107b4565b60200260200101516040518363ffffffff1660e01b81526004016101a892919061080c565b6020604051808303816000875af1925050508015610201575060408051601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682019092526101fe9181019061090c565b60015b61030c5761020d61093a565b806308c379a0036102915750610221610955565b8061022c5750610293565b6000610250858481518110610243576102436107b4565b6020026020010151610325565b9050807f4dcab4ce0e741a040f7e0f9b880557f8de685a9520d4bfac272a81c3c3802b2e836040516102829190610a6a565b60405180910390a2505061030e565b505b3d8080156102bd576040519150601f19603f3d011682016040523d82523d6000602084013e6102c2565b606091505b5060006102da858481518110610243576102436107b4565b9050807fbfd42bb5a1bf8153ea750f66ea4944f23f7b9ae51d0462177b9769aa652b61b5836040516102829190610a6a565b505b8061031881610a7d565b915050610112565b505050565b6000806103358360000151610394565b9050808360800151604051602001610357929190918252602082015260400190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101209392505050565b6000816040516020016103a79190610adc565b604051602081830303815290604052805190602001209050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60a0810181811067ffffffffffffffff82111715610413576104136103c4565b60405250565b610120810167ffffffffffffffff81118282101715610413576104136103c4565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116810181811067ffffffffffffffff8211171561047e5761047e6103c4565b6040525050565b60405161049181610419565b90565b600067ffffffffffffffff8211156104ae576104ae6103c4565b5060051b60200190565b600082601f8301126104c957600080fd5b6040516040810181811067ffffffffffffffff821117156104ec576104ec6103c4565b806040525080604084018581111561050357600080fd5b845b8181101561051d578035835260209283019201610505565b509195945050505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461054c57600080fd5b919050565b803567ffffffffffffffff8116811461054c57600080fd5b803563ffffffff8116811461054c57600080fd5b600082601f83011261058e57600080fd5b8135602061059b82610494565b604080516105a9838261043a565b84815260a094850287018401948482019350888611156105c857600080fd5b8488015b868110156106475781818b0312156105e45760008081fd5b83516105ef816103f3565b6105f882610551565b8152610605878301610551565b87820152610614858301610569565b858201526060610625818401610569565b908201526080610636838201610528565b9082015285529385019381016105cc565b509098975050505050505050565b6000806040838503121561066857600080fd5b823567ffffffffffffffff8082111561068057600080fd5b818501915085601f83011261069457600080fd5b813560206106a182610494565b6040516106ae828261043a565b8381526101a0938402860183019383820192508a8511156106ce57600080fd5b958301955b848710156107865780878c0312156106eb5760008081fd5b6106f3610485565b6106fd8c896104b8565b815261070c8c60408a016104b8565b85820152608080890135604083015260a0808a0135606084015260c0808b01358385015260e0925061073f838c01610528565b8285015261010091506107548f838d016104b8565b908401526107668e6101408c016104b8565b9183019190915261018089013590820152835295860195918301916106d3565b509650508601359250508082111561079d57600080fd5b506107aa8582860161057d565b9150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b8060005b60028110156108065781518452602093840193909101906001016107e7565b50505050565b6000610240820190506108208285516107e3565b602084015161083260408401826107e3565b5060408401516080830152606084015160a0830152608084015160c083015273ffffffffffffffffffffffffffffffffffffffff60a08501511660e083015260c0840151610100610885818501836107e3565b60e0860151915061089a6101408501836107e3565b85015161018084015250825167ffffffffffffffff9081166101a08401526020840151166101c0830152604083015163ffffffff9081166101e0840152606084015116610200830152608083015173ffffffffffffffffffffffffffffffffffffffff166102208301525b9392505050565b60006020828403121561091e57600080fd5b81516bffffffffffffffffffffffff8116811461090557600080fd5b600060033d11156104915760046000803e5060005160e01c90565b600060443d10156109635790565b6040517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc803d016004833e81513d67ffffffffffffffff81602484011181841117156109b157505050505090565b82850191508151818111156109c95750505050505090565b843d87010160208285010111156109e35750505050505090565b6109f26020828601018761043a565b509095945050505050565b60008151808452602060005b82811015610a24578481018201518682018301528101610a09565b82811115610a355760008284880101525b50807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8401168601019250505092915050565b60208152600061090560208301846109fd565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610ad5577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b60408101818360005b6002811015610b04578151835260209283019290910190600101610ae5565b5050509291505056fea164736f6c634300080d000a", +} + +var BatchVRFCoordinatorV2ABI = BatchVRFCoordinatorV2MetaData.ABI + +var BatchVRFCoordinatorV2Bin = BatchVRFCoordinatorV2MetaData.Bin + +func DeployBatchVRFCoordinatorV2(auth *bind.TransactOpts, backend bind.ContractBackend, coordinatorAddr common.Address) (common.Address, *types.Transaction, *BatchVRFCoordinatorV2, error) { + parsed, err := BatchVRFCoordinatorV2MetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(BatchVRFCoordinatorV2Bin), backend, coordinatorAddr) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &BatchVRFCoordinatorV2{BatchVRFCoordinatorV2Caller: BatchVRFCoordinatorV2Caller{contract: contract}, BatchVRFCoordinatorV2Transactor: BatchVRFCoordinatorV2Transactor{contract: contract}, BatchVRFCoordinatorV2Filterer: BatchVRFCoordinatorV2Filterer{contract: contract}}, nil +} + +type BatchVRFCoordinatorV2 struct { + address common.Address + abi abi.ABI + BatchVRFCoordinatorV2Caller + BatchVRFCoordinatorV2Transactor + BatchVRFCoordinatorV2Filterer +} + +type BatchVRFCoordinatorV2Caller struct { + contract *bind.BoundContract +} + +type BatchVRFCoordinatorV2Transactor struct { + contract *bind.BoundContract +} + +type BatchVRFCoordinatorV2Filterer struct { + contract *bind.BoundContract +} + +type BatchVRFCoordinatorV2Session struct { + Contract *BatchVRFCoordinatorV2 + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type BatchVRFCoordinatorV2CallerSession struct { + Contract *BatchVRFCoordinatorV2Caller + CallOpts bind.CallOpts +} + +type BatchVRFCoordinatorV2TransactorSession struct { + Contract *BatchVRFCoordinatorV2Transactor + TransactOpts bind.TransactOpts +} + +type BatchVRFCoordinatorV2Raw struct { + Contract *BatchVRFCoordinatorV2 +} + +type BatchVRFCoordinatorV2CallerRaw struct { + Contract *BatchVRFCoordinatorV2Caller +} + +type BatchVRFCoordinatorV2TransactorRaw struct { + Contract *BatchVRFCoordinatorV2Transactor +} + +func NewBatchVRFCoordinatorV2(address common.Address, backend bind.ContractBackend) (*BatchVRFCoordinatorV2, error) { + abi, err := abi.JSON(strings.NewReader(BatchVRFCoordinatorV2ABI)) + if err != nil { + return nil, err + } + contract, err := bindBatchVRFCoordinatorV2(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2{address: address, abi: abi, BatchVRFCoordinatorV2Caller: BatchVRFCoordinatorV2Caller{contract: contract}, BatchVRFCoordinatorV2Transactor: BatchVRFCoordinatorV2Transactor{contract: contract}, BatchVRFCoordinatorV2Filterer: BatchVRFCoordinatorV2Filterer{contract: contract}}, nil +} + +func NewBatchVRFCoordinatorV2Caller(address common.Address, caller bind.ContractCaller) (*BatchVRFCoordinatorV2Caller, error) { + contract, err := bindBatchVRFCoordinatorV2(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2Caller{contract: contract}, nil +} + +func NewBatchVRFCoordinatorV2Transactor(address common.Address, transactor bind.ContractTransactor) (*BatchVRFCoordinatorV2Transactor, error) { + contract, err := bindBatchVRFCoordinatorV2(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2Transactor{contract: contract}, nil +} + +func NewBatchVRFCoordinatorV2Filterer(address common.Address, filterer bind.ContractFilterer) (*BatchVRFCoordinatorV2Filterer, error) { + contract, err := bindBatchVRFCoordinatorV2(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2Filterer{contract: contract}, nil +} + +func bindBatchVRFCoordinatorV2(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(BatchVRFCoordinatorV2ABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BatchVRFCoordinatorV2.Contract.BatchVRFCoordinatorV2Caller.contract.Call(opts, result, method, params...) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.Contract.BatchVRFCoordinatorV2Transactor.contract.Transfer(opts) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.Contract.BatchVRFCoordinatorV2Transactor.contract.Transact(opts, method, params...) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BatchVRFCoordinatorV2.Contract.contract.Call(opts, result, method, params...) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.Contract.contract.Transfer(opts) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.Contract.contract.Transact(opts, method, params...) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Caller) COORDINATOR(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _BatchVRFCoordinatorV2.contract.Call(opts, &out, "COORDINATOR") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Session) COORDINATOR() (common.Address, error) { + return _BatchVRFCoordinatorV2.Contract.COORDINATOR(&_BatchVRFCoordinatorV2.CallOpts) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2CallerSession) COORDINATOR() (common.Address, error) { + return _BatchVRFCoordinatorV2.Contract.COORDINATOR(&_BatchVRFCoordinatorV2.CallOpts) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Transactor) FulfillRandomWords(opts *bind.TransactOpts, proofs []VRFTypesProof, rcs []VRFTypesRequestCommitment) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.contract.Transact(opts, "fulfillRandomWords", proofs, rcs) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Session) FulfillRandomWords(proofs []VRFTypesProof, rcs []VRFTypesRequestCommitment) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.Contract.FulfillRandomWords(&_BatchVRFCoordinatorV2.TransactOpts, proofs, rcs) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2TransactorSession) FulfillRandomWords(proofs []VRFTypesProof, rcs []VRFTypesRequestCommitment) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.Contract.FulfillRandomWords(&_BatchVRFCoordinatorV2.TransactOpts, proofs, rcs) +} + +type BatchVRFCoordinatorV2ErrorReturnedIterator struct { + Event *BatchVRFCoordinatorV2ErrorReturned + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BatchVRFCoordinatorV2ErrorReturnedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BatchVRFCoordinatorV2ErrorReturned) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BatchVRFCoordinatorV2ErrorReturned) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BatchVRFCoordinatorV2ErrorReturnedIterator) Error() error { + return it.fail +} + +func (it *BatchVRFCoordinatorV2ErrorReturnedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BatchVRFCoordinatorV2ErrorReturned struct { + RequestId *big.Int + Reason string + Raw types.Log +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Filterer) FilterErrorReturned(opts *bind.FilterOpts, requestId []*big.Int) (*BatchVRFCoordinatorV2ErrorReturnedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _BatchVRFCoordinatorV2.contract.FilterLogs(opts, "ErrorReturned", requestIdRule) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2ErrorReturnedIterator{contract: _BatchVRFCoordinatorV2.contract, event: "ErrorReturned", logs: logs, sub: sub}, nil +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Filterer) WatchErrorReturned(opts *bind.WatchOpts, sink chan<- *BatchVRFCoordinatorV2ErrorReturned, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _BatchVRFCoordinatorV2.contract.WatchLogs(opts, "ErrorReturned", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BatchVRFCoordinatorV2ErrorReturned) + if err := _BatchVRFCoordinatorV2.contract.UnpackLog(event, "ErrorReturned", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Filterer) ParseErrorReturned(log types.Log) (*BatchVRFCoordinatorV2ErrorReturned, error) { + event := new(BatchVRFCoordinatorV2ErrorReturned) + if err := _BatchVRFCoordinatorV2.contract.UnpackLog(event, "ErrorReturned", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BatchVRFCoordinatorV2RawErrorReturnedIterator struct { + Event *BatchVRFCoordinatorV2RawErrorReturned + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BatchVRFCoordinatorV2RawErrorReturnedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BatchVRFCoordinatorV2RawErrorReturned) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BatchVRFCoordinatorV2RawErrorReturned) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BatchVRFCoordinatorV2RawErrorReturnedIterator) Error() error { + return it.fail +} + +func (it *BatchVRFCoordinatorV2RawErrorReturnedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BatchVRFCoordinatorV2RawErrorReturned struct { + RequestId *big.Int + LowLevelData []byte + Raw types.Log +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Filterer) FilterRawErrorReturned(opts *bind.FilterOpts, requestId []*big.Int) (*BatchVRFCoordinatorV2RawErrorReturnedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _BatchVRFCoordinatorV2.contract.FilterLogs(opts, "RawErrorReturned", requestIdRule) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2RawErrorReturnedIterator{contract: _BatchVRFCoordinatorV2.contract, event: "RawErrorReturned", logs: logs, sub: sub}, nil +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Filterer) WatchRawErrorReturned(opts *bind.WatchOpts, sink chan<- *BatchVRFCoordinatorV2RawErrorReturned, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _BatchVRFCoordinatorV2.contract.WatchLogs(opts, "RawErrorReturned", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BatchVRFCoordinatorV2RawErrorReturned) + if err := _BatchVRFCoordinatorV2.contract.UnpackLog(event, "RawErrorReturned", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Filterer) ParseRawErrorReturned(log types.Log) (*BatchVRFCoordinatorV2RawErrorReturned, error) { + event := new(BatchVRFCoordinatorV2RawErrorReturned) + if err := _BatchVRFCoordinatorV2.contract.UnpackLog(event, "RawErrorReturned", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _BatchVRFCoordinatorV2.abi.Events["ErrorReturned"].ID: + return _BatchVRFCoordinatorV2.ParseErrorReturned(log) + case _BatchVRFCoordinatorV2.abi.Events["RawErrorReturned"].ID: + return _BatchVRFCoordinatorV2.ParseRawErrorReturned(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (BatchVRFCoordinatorV2ErrorReturned) Topic() common.Hash { + return common.HexToHash("0x4dcab4ce0e741a040f7e0f9b880557f8de685a9520d4bfac272a81c3c3802b2e") +} + +func (BatchVRFCoordinatorV2RawErrorReturned) Topic() common.Hash { + return common.HexToHash("0xbfd42bb5a1bf8153ea750f66ea4944f23f7b9ae51d0462177b9769aa652b61b5") +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2) Address() common.Address { + return _BatchVRFCoordinatorV2.address +} + +type BatchVRFCoordinatorV2Interface interface { + COORDINATOR(opts *bind.CallOpts) (common.Address, error) + + FulfillRandomWords(opts *bind.TransactOpts, proofs []VRFTypesProof, rcs []VRFTypesRequestCommitment) (*types.Transaction, error) + + FilterErrorReturned(opts *bind.FilterOpts, requestId []*big.Int) (*BatchVRFCoordinatorV2ErrorReturnedIterator, error) + + WatchErrorReturned(opts *bind.WatchOpts, sink chan<- *BatchVRFCoordinatorV2ErrorReturned, requestId []*big.Int) (event.Subscription, error) + + ParseErrorReturned(log types.Log) (*BatchVRFCoordinatorV2ErrorReturned, error) + + FilterRawErrorReturned(opts *bind.FilterOpts, requestId []*big.Int) (*BatchVRFCoordinatorV2RawErrorReturnedIterator, error) + + WatchRawErrorReturned(opts *bind.WatchOpts, sink chan<- *BatchVRFCoordinatorV2RawErrorReturned, requestId []*big.Int) (event.Subscription, error) + + ParseRawErrorReturned(log types.Log) (*BatchVRFCoordinatorV2RawErrorReturned, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/internal/gethwrappers/generated/cron_upkeep_factory_wrapper/cron_upkeep_factory_wrapper.go b/core/internal/gethwrappers/generated/cron_upkeep_factory_wrapper/cron_upkeep_factory_wrapper.go new file mode 100644 index 00000000000..c7427408781 --- /dev/null +++ b/core/internal/gethwrappers/generated/cron_upkeep_factory_wrapper/cron_upkeep_factory_wrapper.go @@ -0,0 +1,786 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package cron_upkeep_factory_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +var CronUpkeepFactoryMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"upkeep\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"NewCronUpkeepCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"cronDelegateAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"handler\",\"type\":\"bytes\"},{\"internalType\":\"string\",\"name\":\"cronString\",\"type\":\"string\"}],\"name\":\"encodeCronJob\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"cronString\",\"type\":\"string\"}],\"name\":\"encodeCronString\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"newCronUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedJob\",\"type\":\"bytes\"}],\"name\":\"newCronUpkeepWithJob\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_maxJobs\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"maxJobs\",\"type\":\"uint256\"}],\"name\":\"setMaxJobs\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +var CronUpkeepFactoryABI = CronUpkeepFactoryMetaData.ABI + +type CronUpkeepFactory struct { + address common.Address + abi abi.ABI + CronUpkeepFactoryCaller + CronUpkeepFactoryTransactor + CronUpkeepFactoryFilterer +} + +type CronUpkeepFactoryCaller struct { + contract *bind.BoundContract +} + +type CronUpkeepFactoryTransactor struct { + contract *bind.BoundContract +} + +type CronUpkeepFactoryFilterer struct { + contract *bind.BoundContract +} + +type CronUpkeepFactorySession struct { + Contract *CronUpkeepFactory + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type CronUpkeepFactoryCallerSession struct { + Contract *CronUpkeepFactoryCaller + CallOpts bind.CallOpts +} + +type CronUpkeepFactoryTransactorSession struct { + Contract *CronUpkeepFactoryTransactor + TransactOpts bind.TransactOpts +} + +type CronUpkeepFactoryRaw struct { + Contract *CronUpkeepFactory +} + +type CronUpkeepFactoryCallerRaw struct { + Contract *CronUpkeepFactoryCaller +} + +type CronUpkeepFactoryTransactorRaw struct { + Contract *CronUpkeepFactoryTransactor +} + +func NewCronUpkeepFactory(address common.Address, backend bind.ContractBackend) (*CronUpkeepFactory, error) { + abi, err := abi.JSON(strings.NewReader(CronUpkeepFactoryABI)) + if err != nil { + return nil, err + } + contract, err := bindCronUpkeepFactory(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &CronUpkeepFactory{address: address, abi: abi, CronUpkeepFactoryCaller: CronUpkeepFactoryCaller{contract: contract}, CronUpkeepFactoryTransactor: CronUpkeepFactoryTransactor{contract: contract}, CronUpkeepFactoryFilterer: CronUpkeepFactoryFilterer{contract: contract}}, nil +} + +func NewCronUpkeepFactoryCaller(address common.Address, caller bind.ContractCaller) (*CronUpkeepFactoryCaller, error) { + contract, err := bindCronUpkeepFactory(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &CronUpkeepFactoryCaller{contract: contract}, nil +} + +func NewCronUpkeepFactoryTransactor(address common.Address, transactor bind.ContractTransactor) (*CronUpkeepFactoryTransactor, error) { + contract, err := bindCronUpkeepFactory(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &CronUpkeepFactoryTransactor{contract: contract}, nil +} + +func NewCronUpkeepFactoryFilterer(address common.Address, filterer bind.ContractFilterer) (*CronUpkeepFactoryFilterer, error) { + contract, err := bindCronUpkeepFactory(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &CronUpkeepFactoryFilterer{contract: contract}, nil +} + +func bindCronUpkeepFactory(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(CronUpkeepFactoryABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _CronUpkeepFactory.Contract.CronUpkeepFactoryCaller.contract.Call(opts, result, method, params...) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.CronUpkeepFactoryTransactor.contract.Transfer(opts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.CronUpkeepFactoryTransactor.contract.Transact(opts, method, params...) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _CronUpkeepFactory.Contract.contract.Call(opts, result, method, params...) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.contract.Transfer(opts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.contract.Transact(opts, method, params...) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCaller) CronDelegateAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _CronUpkeepFactory.contract.Call(opts, &out, "cronDelegateAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) CronDelegateAddress() (common.Address, error) { + return _CronUpkeepFactory.Contract.CronDelegateAddress(&_CronUpkeepFactory.CallOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCallerSession) CronDelegateAddress() (common.Address, error) { + return _CronUpkeepFactory.Contract.CronDelegateAddress(&_CronUpkeepFactory.CallOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCaller) EncodeCronJob(opts *bind.CallOpts, target common.Address, handler []byte, cronString string) ([]byte, error) { + var out []interface{} + err := _CronUpkeepFactory.contract.Call(opts, &out, "encodeCronJob", target, handler, cronString) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) EncodeCronJob(target common.Address, handler []byte, cronString string) ([]byte, error) { + return _CronUpkeepFactory.Contract.EncodeCronJob(&_CronUpkeepFactory.CallOpts, target, handler, cronString) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCallerSession) EncodeCronJob(target common.Address, handler []byte, cronString string) ([]byte, error) { + return _CronUpkeepFactory.Contract.EncodeCronJob(&_CronUpkeepFactory.CallOpts, target, handler, cronString) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCaller) EncodeCronString(opts *bind.CallOpts, cronString string) ([]byte, error) { + var out []interface{} + err := _CronUpkeepFactory.contract.Call(opts, &out, "encodeCronString", cronString) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) EncodeCronString(cronString string) ([]byte, error) { + return _CronUpkeepFactory.Contract.EncodeCronString(&_CronUpkeepFactory.CallOpts, cronString) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCallerSession) EncodeCronString(cronString string) ([]byte, error) { + return _CronUpkeepFactory.Contract.EncodeCronString(&_CronUpkeepFactory.CallOpts, cronString) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _CronUpkeepFactory.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) Owner() (common.Address, error) { + return _CronUpkeepFactory.Contract.Owner(&_CronUpkeepFactory.CallOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCallerSession) Owner() (common.Address, error) { + return _CronUpkeepFactory.Contract.Owner(&_CronUpkeepFactory.CallOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCaller) SMaxJobs(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _CronUpkeepFactory.contract.Call(opts, &out, "s_maxJobs") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) SMaxJobs() (*big.Int, error) { + return _CronUpkeepFactory.Contract.SMaxJobs(&_CronUpkeepFactory.CallOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCallerSession) SMaxJobs() (*big.Int, error) { + return _CronUpkeepFactory.Contract.SMaxJobs(&_CronUpkeepFactory.CallOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeepFactory.contract.Transact(opts, "acceptOwnership") +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) AcceptOwnership() (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.AcceptOwnership(&_CronUpkeepFactory.TransactOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.AcceptOwnership(&_CronUpkeepFactory.TransactOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactor) NewCronUpkeep(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeepFactory.contract.Transact(opts, "newCronUpkeep") +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) NewCronUpkeep() (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.NewCronUpkeep(&_CronUpkeepFactory.TransactOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorSession) NewCronUpkeep() (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.NewCronUpkeep(&_CronUpkeepFactory.TransactOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactor) NewCronUpkeepWithJob(opts *bind.TransactOpts, encodedJob []byte) (*types.Transaction, error) { + return _CronUpkeepFactory.contract.Transact(opts, "newCronUpkeepWithJob", encodedJob) +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) NewCronUpkeepWithJob(encodedJob []byte) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.NewCronUpkeepWithJob(&_CronUpkeepFactory.TransactOpts, encodedJob) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorSession) NewCronUpkeepWithJob(encodedJob []byte) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.NewCronUpkeepWithJob(&_CronUpkeepFactory.TransactOpts, encodedJob) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactor) SetMaxJobs(opts *bind.TransactOpts, maxJobs *big.Int) (*types.Transaction, error) { + return _CronUpkeepFactory.contract.Transact(opts, "setMaxJobs", maxJobs) +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) SetMaxJobs(maxJobs *big.Int) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.SetMaxJobs(&_CronUpkeepFactory.TransactOpts, maxJobs) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorSession) SetMaxJobs(maxJobs *big.Int) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.SetMaxJobs(&_CronUpkeepFactory.TransactOpts, maxJobs) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _CronUpkeepFactory.contract.Transact(opts, "transferOwnership", to) +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.TransferOwnership(&_CronUpkeepFactory.TransactOpts, to) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.TransferOwnership(&_CronUpkeepFactory.TransactOpts, to) +} + +type CronUpkeepFactoryNewCronUpkeepCreatedIterator struct { + Event *CronUpkeepFactoryNewCronUpkeepCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepFactoryNewCronUpkeepCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepFactoryNewCronUpkeepCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepFactoryNewCronUpkeepCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepFactoryNewCronUpkeepCreatedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepFactoryNewCronUpkeepCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepFactoryNewCronUpkeepCreated struct { + Upkeep common.Address + Owner common.Address + Raw types.Log +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) FilterNewCronUpkeepCreated(opts *bind.FilterOpts) (*CronUpkeepFactoryNewCronUpkeepCreatedIterator, error) { + + logs, sub, err := _CronUpkeepFactory.contract.FilterLogs(opts, "NewCronUpkeepCreated") + if err != nil { + return nil, err + } + return &CronUpkeepFactoryNewCronUpkeepCreatedIterator{contract: _CronUpkeepFactory.contract, event: "NewCronUpkeepCreated", logs: logs, sub: sub}, nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) WatchNewCronUpkeepCreated(opts *bind.WatchOpts, sink chan<- *CronUpkeepFactoryNewCronUpkeepCreated) (event.Subscription, error) { + + logs, sub, err := _CronUpkeepFactory.contract.WatchLogs(opts, "NewCronUpkeepCreated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepFactoryNewCronUpkeepCreated) + if err := _CronUpkeepFactory.contract.UnpackLog(event, "NewCronUpkeepCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) ParseNewCronUpkeepCreated(log types.Log) (*CronUpkeepFactoryNewCronUpkeepCreated, error) { + event := new(CronUpkeepFactoryNewCronUpkeepCreated) + if err := _CronUpkeepFactory.contract.UnpackLog(event, "NewCronUpkeepCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepFactoryOwnershipTransferRequestedIterator struct { + Event *CronUpkeepFactoryOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepFactoryOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepFactoryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepFactoryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepFactoryOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepFactoryOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepFactoryOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepFactoryOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeepFactory.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &CronUpkeepFactoryOwnershipTransferRequestedIterator{contract: _CronUpkeepFactory.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *CronUpkeepFactoryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeepFactory.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepFactoryOwnershipTransferRequested) + if err := _CronUpkeepFactory.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) ParseOwnershipTransferRequested(log types.Log) (*CronUpkeepFactoryOwnershipTransferRequested, error) { + event := new(CronUpkeepFactoryOwnershipTransferRequested) + if err := _CronUpkeepFactory.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepFactoryOwnershipTransferredIterator struct { + Event *CronUpkeepFactoryOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepFactoryOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepFactoryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepFactoryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepFactoryOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepFactoryOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepFactoryOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepFactoryOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeepFactory.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &CronUpkeepFactoryOwnershipTransferredIterator{contract: _CronUpkeepFactory.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *CronUpkeepFactoryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeepFactory.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepFactoryOwnershipTransferred) + if err := _CronUpkeepFactory.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) ParseOwnershipTransferred(log types.Log) (*CronUpkeepFactoryOwnershipTransferred, error) { + event := new(CronUpkeepFactoryOwnershipTransferred) + if err := _CronUpkeepFactory.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_CronUpkeepFactory *CronUpkeepFactory) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _CronUpkeepFactory.abi.Events["NewCronUpkeepCreated"].ID: + return _CronUpkeepFactory.ParseNewCronUpkeepCreated(log) + case _CronUpkeepFactory.abi.Events["OwnershipTransferRequested"].ID: + return _CronUpkeepFactory.ParseOwnershipTransferRequested(log) + case _CronUpkeepFactory.abi.Events["OwnershipTransferred"].ID: + return _CronUpkeepFactory.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (CronUpkeepFactoryNewCronUpkeepCreated) Topic() common.Hash { + return common.HexToHash("0x959d571686b1c9343b61bdc3c0459760cb9695fcd4c4c64845e3b2cdd6865ced") +} + +func (CronUpkeepFactoryOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (CronUpkeepFactoryOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_CronUpkeepFactory *CronUpkeepFactory) Address() common.Address { + return _CronUpkeepFactory.address +} + +type CronUpkeepFactoryInterface interface { + CronDelegateAddress(opts *bind.CallOpts) (common.Address, error) + + EncodeCronJob(opts *bind.CallOpts, target common.Address, handler []byte, cronString string) ([]byte, error) + + EncodeCronString(opts *bind.CallOpts, cronString string) ([]byte, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SMaxJobs(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + NewCronUpkeep(opts *bind.TransactOpts) (*types.Transaction, error) + + NewCronUpkeepWithJob(opts *bind.TransactOpts, encodedJob []byte) (*types.Transaction, error) + + SetMaxJobs(opts *bind.TransactOpts, maxJobs *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterNewCronUpkeepCreated(opts *bind.FilterOpts) (*CronUpkeepFactoryNewCronUpkeepCreatedIterator, error) + + WatchNewCronUpkeepCreated(opts *bind.WatchOpts, sink chan<- *CronUpkeepFactoryNewCronUpkeepCreated) (event.Subscription, error) + + ParseNewCronUpkeepCreated(log types.Log) (*CronUpkeepFactoryNewCronUpkeepCreated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepFactoryOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *CronUpkeepFactoryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*CronUpkeepFactoryOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepFactoryOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *CronUpkeepFactoryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*CronUpkeepFactoryOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/internal/gethwrappers/generated/cron_upkeep_wrapper/cron_upkeep_wrapper.go b/core/internal/gethwrappers/generated/cron_upkeep_wrapper/cron_upkeep_wrapper.go new file mode 100644 index 00000000000..f4a7f01e56f --- /dev/null +++ b/core/internal/gethwrappers/generated/cron_upkeep_wrapper/cron_upkeep_wrapper.go @@ -0,0 +1,1578 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package cron_upkeep_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +var CronUpkeepMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"delegate\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"maxJobs\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"firstJob\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"string\",\"name\":\"reason\",\"type\":\"string\"}],\"name\":\"CallFailed\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"CronJobIDNotFound\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ExceedsMaxJobs\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidHandler\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TickDoesntMatchSpec\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TickInFuture\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TickTooOld\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnknownFieldType\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"handler\",\"type\":\"bytes\"}],\"name\":\"CronJobCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"CronJobDeleted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"}],\"name\":\"CronJobExecuted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"handler\",\"type\":\"bytes\"}],\"name\":\"CronJobUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"handler\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"encodedCronSpec\",\"type\":\"bytes\"}],\"name\":\"createCronJobFromEncodedSpec\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"deleteCronJob\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getActiveCronJobIDs\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getCronJob\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"handler\",\"type\":\"bytes\"},{\"internalType\":\"string\",\"name\":\"cronString\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"nextTick\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_maxJobs\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"newTarget\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"newHandler\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"newEncodedCronSpec\",\"type\":\"bytes\"}],\"name\":\"updateCronJob\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", +} + +var CronUpkeepABI = CronUpkeepMetaData.ABI + +type CronUpkeep struct { + address common.Address + abi abi.ABI + CronUpkeepCaller + CronUpkeepTransactor + CronUpkeepFilterer +} + +type CronUpkeepCaller struct { + contract *bind.BoundContract +} + +type CronUpkeepTransactor struct { + contract *bind.BoundContract +} + +type CronUpkeepFilterer struct { + contract *bind.BoundContract +} + +type CronUpkeepSession struct { + Contract *CronUpkeep + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type CronUpkeepCallerSession struct { + Contract *CronUpkeepCaller + CallOpts bind.CallOpts +} + +type CronUpkeepTransactorSession struct { + Contract *CronUpkeepTransactor + TransactOpts bind.TransactOpts +} + +type CronUpkeepRaw struct { + Contract *CronUpkeep +} + +type CronUpkeepCallerRaw struct { + Contract *CronUpkeepCaller +} + +type CronUpkeepTransactorRaw struct { + Contract *CronUpkeepTransactor +} + +func NewCronUpkeep(address common.Address, backend bind.ContractBackend) (*CronUpkeep, error) { + abi, err := abi.JSON(strings.NewReader(CronUpkeepABI)) + if err != nil { + return nil, err + } + contract, err := bindCronUpkeep(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &CronUpkeep{address: address, abi: abi, CronUpkeepCaller: CronUpkeepCaller{contract: contract}, CronUpkeepTransactor: CronUpkeepTransactor{contract: contract}, CronUpkeepFilterer: CronUpkeepFilterer{contract: contract}}, nil +} + +func NewCronUpkeepCaller(address common.Address, caller bind.ContractCaller) (*CronUpkeepCaller, error) { + contract, err := bindCronUpkeep(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &CronUpkeepCaller{contract: contract}, nil +} + +func NewCronUpkeepTransactor(address common.Address, transactor bind.ContractTransactor) (*CronUpkeepTransactor, error) { + contract, err := bindCronUpkeep(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &CronUpkeepTransactor{contract: contract}, nil +} + +func NewCronUpkeepFilterer(address common.Address, filterer bind.ContractFilterer) (*CronUpkeepFilterer, error) { + contract, err := bindCronUpkeep(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &CronUpkeepFilterer{contract: contract}, nil +} + +func bindCronUpkeep(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(CronUpkeepABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +func (_CronUpkeep *CronUpkeepRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _CronUpkeep.Contract.CronUpkeepCaller.contract.Call(opts, result, method, params...) +} + +func (_CronUpkeep *CronUpkeepRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeep.Contract.CronUpkeepTransactor.contract.Transfer(opts) +} + +func (_CronUpkeep *CronUpkeepRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _CronUpkeep.Contract.CronUpkeepTransactor.contract.Transact(opts, method, params...) +} + +func (_CronUpkeep *CronUpkeepCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _CronUpkeep.Contract.contract.Call(opts, result, method, params...) +} + +func (_CronUpkeep *CronUpkeepTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeep.Contract.contract.Transfer(opts) +} + +func (_CronUpkeep *CronUpkeepTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _CronUpkeep.Contract.contract.Transact(opts, method, params...) +} + +func (_CronUpkeep *CronUpkeepCaller) GetActiveCronJobIDs(opts *bind.CallOpts) ([]*big.Int, error) { + var out []interface{} + err := _CronUpkeep.contract.Call(opts, &out, "getActiveCronJobIDs") + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_CronUpkeep *CronUpkeepSession) GetActiveCronJobIDs() ([]*big.Int, error) { + return _CronUpkeep.Contract.GetActiveCronJobIDs(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCallerSession) GetActiveCronJobIDs() ([]*big.Int, error) { + return _CronUpkeep.Contract.GetActiveCronJobIDs(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCaller) GetCronJob(opts *bind.CallOpts, id *big.Int) (GetCronJob, + + error) { + var out []interface{} + err := _CronUpkeep.contract.Call(opts, &out, "getCronJob", id) + + outstruct := new(GetCronJob) + if err != nil { + return *outstruct, err + } + + outstruct.Target = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.Handler = *abi.ConvertType(out[1], new([]byte)).(*[]byte) + outstruct.CronString = *abi.ConvertType(out[2], new(string)).(*string) + outstruct.NextTick = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_CronUpkeep *CronUpkeepSession) GetCronJob(id *big.Int) (GetCronJob, + + error) { + return _CronUpkeep.Contract.GetCronJob(&_CronUpkeep.CallOpts, id) +} + +func (_CronUpkeep *CronUpkeepCallerSession) GetCronJob(id *big.Int) (GetCronJob, + + error) { + return _CronUpkeep.Contract.GetCronJob(&_CronUpkeep.CallOpts, id) +} + +func (_CronUpkeep *CronUpkeepCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _CronUpkeep.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_CronUpkeep *CronUpkeepSession) Owner() (common.Address, error) { + return _CronUpkeep.Contract.Owner(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCallerSession) Owner() (common.Address, error) { + return _CronUpkeep.Contract.Owner(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCaller) Paused(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _CronUpkeep.contract.Call(opts, &out, "paused") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_CronUpkeep *CronUpkeepSession) Paused() (bool, error) { + return _CronUpkeep.Contract.Paused(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCallerSession) Paused() (bool, error) { + return _CronUpkeep.Contract.Paused(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCaller) SMaxJobs(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _CronUpkeep.contract.Call(opts, &out, "s_maxJobs") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_CronUpkeep *CronUpkeepSession) SMaxJobs() (*big.Int, error) { + return _CronUpkeep.Contract.SMaxJobs(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCallerSession) SMaxJobs() (*big.Int, error) { + return _CronUpkeep.Contract.SMaxJobs(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "acceptOwnership") +} + +func (_CronUpkeep *CronUpkeepSession) AcceptOwnership() (*types.Transaction, error) { + return _CronUpkeep.Contract.AcceptOwnership(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _CronUpkeep.Contract.AcceptOwnership(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactor) CheckUpkeep(opts *bind.TransactOpts, arg0 []byte) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "checkUpkeep", arg0) +} + +func (_CronUpkeep *CronUpkeepSession) CheckUpkeep(arg0 []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.CheckUpkeep(&_CronUpkeep.TransactOpts, arg0) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) CheckUpkeep(arg0 []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.CheckUpkeep(&_CronUpkeep.TransactOpts, arg0) +} + +func (_CronUpkeep *CronUpkeepTransactor) CreateCronJobFromEncodedSpec(opts *bind.TransactOpts, target common.Address, handler []byte, encodedCronSpec []byte) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "createCronJobFromEncodedSpec", target, handler, encodedCronSpec) +} + +func (_CronUpkeep *CronUpkeepSession) CreateCronJobFromEncodedSpec(target common.Address, handler []byte, encodedCronSpec []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.CreateCronJobFromEncodedSpec(&_CronUpkeep.TransactOpts, target, handler, encodedCronSpec) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) CreateCronJobFromEncodedSpec(target common.Address, handler []byte, encodedCronSpec []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.CreateCronJobFromEncodedSpec(&_CronUpkeep.TransactOpts, target, handler, encodedCronSpec) +} + +func (_CronUpkeep *CronUpkeepTransactor) DeleteCronJob(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "deleteCronJob", id) +} + +func (_CronUpkeep *CronUpkeepSession) DeleteCronJob(id *big.Int) (*types.Transaction, error) { + return _CronUpkeep.Contract.DeleteCronJob(&_CronUpkeep.TransactOpts, id) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) DeleteCronJob(id *big.Int) (*types.Transaction, error) { + return _CronUpkeep.Contract.DeleteCronJob(&_CronUpkeep.TransactOpts, id) +} + +func (_CronUpkeep *CronUpkeepTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "pause") +} + +func (_CronUpkeep *CronUpkeepSession) Pause() (*types.Transaction, error) { + return _CronUpkeep.Contract.Pause(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) Pause() (*types.Transaction, error) { + return _CronUpkeep.Contract.Pause(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "performUpkeep", performData) +} + +func (_CronUpkeep *CronUpkeepSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.PerformUpkeep(&_CronUpkeep.TransactOpts, performData) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.PerformUpkeep(&_CronUpkeep.TransactOpts, performData) +} + +func (_CronUpkeep *CronUpkeepTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "transferOwnership", to) +} + +func (_CronUpkeep *CronUpkeepSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _CronUpkeep.Contract.TransferOwnership(&_CronUpkeep.TransactOpts, to) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _CronUpkeep.Contract.TransferOwnership(&_CronUpkeep.TransactOpts, to) +} + +func (_CronUpkeep *CronUpkeepTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "unpause") +} + +func (_CronUpkeep *CronUpkeepSession) Unpause() (*types.Transaction, error) { + return _CronUpkeep.Contract.Unpause(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) Unpause() (*types.Transaction, error) { + return _CronUpkeep.Contract.Unpause(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactor) UpdateCronJob(opts *bind.TransactOpts, id *big.Int, newTarget common.Address, newHandler []byte, newEncodedCronSpec []byte) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "updateCronJob", id, newTarget, newHandler, newEncodedCronSpec) +} + +func (_CronUpkeep *CronUpkeepSession) UpdateCronJob(id *big.Int, newTarget common.Address, newHandler []byte, newEncodedCronSpec []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.UpdateCronJob(&_CronUpkeep.TransactOpts, id, newTarget, newHandler, newEncodedCronSpec) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) UpdateCronJob(id *big.Int, newTarget common.Address, newHandler []byte, newEncodedCronSpec []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.UpdateCronJob(&_CronUpkeep.TransactOpts, id, newTarget, newHandler, newEncodedCronSpec) +} + +func (_CronUpkeep *CronUpkeepTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _CronUpkeep.contract.RawTransact(opts, calldata) +} + +func (_CronUpkeep *CronUpkeepSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.Fallback(&_CronUpkeep.TransactOpts, calldata) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.Fallback(&_CronUpkeep.TransactOpts, calldata) +} + +func (_CronUpkeep *CronUpkeepTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeep.contract.RawTransact(opts, nil) +} + +func (_CronUpkeep *CronUpkeepSession) Receive() (*types.Transaction, error) { + return _CronUpkeep.Contract.Receive(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) Receive() (*types.Transaction, error) { + return _CronUpkeep.Contract.Receive(&_CronUpkeep.TransactOpts) +} + +type CronUpkeepCronJobCreatedIterator struct { + Event *CronUpkeepCronJobCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepCronJobCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepCronJobCreatedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepCronJobCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepCronJobCreated struct { + Id *big.Int + Target common.Address + Handler []byte + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterCronJobCreated(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobCreatedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "CronJobCreated", idRule) + if err != nil { + return nil, err + } + return &CronUpkeepCronJobCreatedIterator{contract: _CronUpkeep.contract, event: "CronJobCreated", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchCronJobCreated(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobCreated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "CronJobCreated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepCronJobCreated) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseCronJobCreated(log types.Log) (*CronUpkeepCronJobCreated, error) { + event := new(CronUpkeepCronJobCreated) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepCronJobDeletedIterator struct { + Event *CronUpkeepCronJobDeleted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepCronJobDeletedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobDeleted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobDeleted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepCronJobDeletedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepCronJobDeletedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepCronJobDeleted struct { + Id *big.Int + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterCronJobDeleted(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobDeletedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "CronJobDeleted", idRule) + if err != nil { + return nil, err + } + return &CronUpkeepCronJobDeletedIterator{contract: _CronUpkeep.contract, event: "CronJobDeleted", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchCronJobDeleted(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobDeleted, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "CronJobDeleted", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepCronJobDeleted) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobDeleted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseCronJobDeleted(log types.Log) (*CronUpkeepCronJobDeleted, error) { + event := new(CronUpkeepCronJobDeleted) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobDeleted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepCronJobExecutedIterator struct { + Event *CronUpkeepCronJobExecuted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepCronJobExecutedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobExecuted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobExecuted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepCronJobExecutedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepCronJobExecutedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepCronJobExecuted struct { + Id *big.Int + Timestamp *big.Int + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterCronJobExecuted(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobExecutedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "CronJobExecuted", idRule) + if err != nil { + return nil, err + } + return &CronUpkeepCronJobExecutedIterator{contract: _CronUpkeep.contract, event: "CronJobExecuted", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchCronJobExecuted(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobExecuted, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "CronJobExecuted", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepCronJobExecuted) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobExecuted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseCronJobExecuted(log types.Log) (*CronUpkeepCronJobExecuted, error) { + event := new(CronUpkeepCronJobExecuted) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobExecuted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepCronJobUpdatedIterator struct { + Event *CronUpkeepCronJobUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepCronJobUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepCronJobUpdatedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepCronJobUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepCronJobUpdated struct { + Id *big.Int + Target common.Address + Handler []byte + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterCronJobUpdated(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobUpdatedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "CronJobUpdated", idRule) + if err != nil { + return nil, err + } + return &CronUpkeepCronJobUpdatedIterator{contract: _CronUpkeep.contract, event: "CronJobUpdated", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchCronJobUpdated(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobUpdated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "CronJobUpdated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepCronJobUpdated) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseCronJobUpdated(log types.Log) (*CronUpkeepCronJobUpdated, error) { + event := new(CronUpkeepCronJobUpdated) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepOwnershipTransferRequestedIterator struct { + Event *CronUpkeepOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &CronUpkeepOwnershipTransferRequestedIterator{contract: _CronUpkeep.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *CronUpkeepOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepOwnershipTransferRequested) + if err := _CronUpkeep.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseOwnershipTransferRequested(log types.Log) (*CronUpkeepOwnershipTransferRequested, error) { + event := new(CronUpkeepOwnershipTransferRequested) + if err := _CronUpkeep.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepOwnershipTransferredIterator struct { + Event *CronUpkeepOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &CronUpkeepOwnershipTransferredIterator{contract: _CronUpkeep.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *CronUpkeepOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepOwnershipTransferred) + if err := _CronUpkeep.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseOwnershipTransferred(log types.Log) (*CronUpkeepOwnershipTransferred, error) { + event := new(CronUpkeepOwnershipTransferred) + if err := _CronUpkeep.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepPausedIterator struct { + Event *CronUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepPaused struct { + Account common.Address + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterPaused(opts *bind.FilterOpts) (*CronUpkeepPausedIterator, error) { + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &CronUpkeepPausedIterator{contract: _CronUpkeep.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *CronUpkeepPaused) (event.Subscription, error) { + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepPaused) + if err := _CronUpkeep.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParsePaused(log types.Log) (*CronUpkeepPaused, error) { + event := new(CronUpkeepPaused) + if err := _CronUpkeep.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepUnpausedIterator struct { + Event *CronUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterUnpaused(opts *bind.FilterOpts) (*CronUpkeepUnpausedIterator, error) { + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &CronUpkeepUnpausedIterator{contract: _CronUpkeep.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *CronUpkeepUnpaused) (event.Subscription, error) { + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepUnpaused) + if err := _CronUpkeep.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseUnpaused(log types.Log) (*CronUpkeepUnpaused, error) { + event := new(CronUpkeepUnpaused) + if err := _CronUpkeep.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetCronJob struct { + Target common.Address + Handler []byte + CronString string + NextTick *big.Int +} + +func (_CronUpkeep *CronUpkeep) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _CronUpkeep.abi.Events["CronJobCreated"].ID: + return _CronUpkeep.ParseCronJobCreated(log) + case _CronUpkeep.abi.Events["CronJobDeleted"].ID: + return _CronUpkeep.ParseCronJobDeleted(log) + case _CronUpkeep.abi.Events["CronJobExecuted"].ID: + return _CronUpkeep.ParseCronJobExecuted(log) + case _CronUpkeep.abi.Events["CronJobUpdated"].ID: + return _CronUpkeep.ParseCronJobUpdated(log) + case _CronUpkeep.abi.Events["OwnershipTransferRequested"].ID: + return _CronUpkeep.ParseOwnershipTransferRequested(log) + case _CronUpkeep.abi.Events["OwnershipTransferred"].ID: + return _CronUpkeep.ParseOwnershipTransferred(log) + case _CronUpkeep.abi.Events["Paused"].ID: + return _CronUpkeep.ParsePaused(log) + case _CronUpkeep.abi.Events["Unpaused"].ID: + return _CronUpkeep.ParseUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (CronUpkeepCronJobCreated) Topic() common.Hash { + return common.HexToHash("0xe66fb0bca0f9d6a395d3eaf5f39c6ac87dd34aff4e3f2f9a9b33a46f15589627") +} + +func (CronUpkeepCronJobDeleted) Topic() common.Hash { + return common.HexToHash("0x7aaa5a7c35e162386d922bd67e91ea476d38d9bb931bc369d8b15ab113250974") +} + +func (CronUpkeepCronJobExecuted) Topic() common.Hash { + return common.HexToHash("0x30f05dfc7571f43926790e295bb282b76b7174d9121c31c2b26def175b63a759") +} + +func (CronUpkeepCronJobUpdated) Topic() common.Hash { + return common.HexToHash("0xeeaf6ad42034ba5357ffd961b8c80bf6cbf53c224020541e46573a3f19ef09a5") +} + +func (CronUpkeepOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (CronUpkeepOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (CronUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (CronUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (_CronUpkeep *CronUpkeep) Address() common.Address { + return _CronUpkeep.address +} + +type CronUpkeepInterface interface { + GetActiveCronJobIDs(opts *bind.CallOpts) ([]*big.Int, error) + + GetCronJob(opts *bind.CallOpts, id *big.Int) (GetCronJob, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Paused(opts *bind.CallOpts) (bool, error) + + SMaxJobs(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + CheckUpkeep(opts *bind.TransactOpts, arg0 []byte) (*types.Transaction, error) + + CreateCronJobFromEncodedSpec(opts *bind.TransactOpts, target common.Address, handler []byte, encodedCronSpec []byte) (*types.Transaction, error) + + DeleteCronJob(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + UpdateCronJob(opts *bind.TransactOpts, id *big.Int, newTarget common.Address, newHandler []byte, newEncodedCronSpec []byte) (*types.Transaction, error) + + Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) + + Receive(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterCronJobCreated(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobCreatedIterator, error) + + WatchCronJobCreated(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobCreated, id []*big.Int) (event.Subscription, error) + + ParseCronJobCreated(log types.Log) (*CronUpkeepCronJobCreated, error) + + FilterCronJobDeleted(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobDeletedIterator, error) + + WatchCronJobDeleted(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobDeleted, id []*big.Int) (event.Subscription, error) + + ParseCronJobDeleted(log types.Log) (*CronUpkeepCronJobDeleted, error) + + FilterCronJobExecuted(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobExecutedIterator, error) + + WatchCronJobExecuted(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobExecuted, id []*big.Int) (event.Subscription, error) + + ParseCronJobExecuted(log types.Log) (*CronUpkeepCronJobExecuted, error) + + FilterCronJobUpdated(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobUpdatedIterator, error) + + WatchCronJobUpdated(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobUpdated, id []*big.Int) (event.Subscription, error) + + ParseCronJobUpdated(log types.Log) (*CronUpkeepCronJobUpdated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *CronUpkeepOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*CronUpkeepOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *CronUpkeepOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*CronUpkeepOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*CronUpkeepPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *CronUpkeepPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*CronUpkeepPaused, error) + + FilterUnpaused(opts *bind.FilterOpts) (*CronUpkeepUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *CronUpkeepUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*CronUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/internal/gethwrappers/generated/keeper_registry_vb_wrapper/keeper_registry_vb_wrapper.go b/core/internal/gethwrappers/generated/keeper_registry_vb_wrapper/keeper_registry_vb_wrapper.go deleted file mode 100644 index 78c187cafba..00000000000 --- a/core/internal/gethwrappers/generated/keeper_registry_vb_wrapper/keeper_registry_vb_wrapper.go +++ /dev/null @@ -1,3221 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package keeper_registry_vb_wrapper - -import ( - "errors" - "fmt" - "math/big" - "strings" - - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" - "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated" -) - -var ( - _ = errors.New - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription -) - -var KeeperRegistryVBMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkEthFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"fastGasFeed\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"mustTakeTurns\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"mustTakeTurns\",\"type\":\"bool\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"}],\"name\":\"FlatFeeSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"KeepersUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"RegistrarChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"FAST_GAS_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"LINK\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"LINK_ETH_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"cancelUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"maxLinkPayment\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"adjustedGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkEth\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCanceledUpkeepList\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFlatFee\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getKeeperInfo\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getKeeperList\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"getMaxPaymentForGas\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"maxPayment\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"minBalance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMustTakeTurns\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRegistrar\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getUpkeep\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"lastKeeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getUpkeepCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"mustTakeTurns\",\"type\":\"bool\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setKeepers\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"name\":\"setRegistrar\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x60e06040523480156200001157600080fd5b50604051620054da380380620054da83398181016040526101808110156200003857600080fd5b508051602082015160408301516060840151608085015160a086015160c087015160e08801516101008901516101208a01516101408b0151610160909b0151999a9899979896979596949593949293919290913380600081620000e2576040805162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f0000000000000000604482015290519081900360640190fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620001155762000115816200016e565b50506001600255506003805460ff191690556001600160601b031960608d811b82166080528c811b821660a0528b901b1660c0526200015c8989898989898989896200021e565b505050505050505050505050620004c3565b6001600160a01b038116331415620001cd576040805162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6200022862000461565b6040518060e001604052808a63ffffffff1681526020018963ffffffff1681526020018862ffffff1681526020018763ffffffff1681526020018662ffffff1681526020018561ffff168152602001821515815250600b60008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548162ffffff021916908362ffffff160217905550606082015181600001600b6101000a81548163ffffffff021916908363ffffffff160217905550608082015181600001600f6101000a81548162ffffff021916908362ffffff16021790555060a08201518160000160126101000a81548161ffff021916908361ffff16021790555060c08201518160000160146101000a81548160ff02191690831515021790555090505082600c8190555081600d819055507f6db8cdacf21c3bbd6135926f497c6fba81fd6969684ecf85f56550d2b1f8e6918988888888888888604051808963ffffffff1681526020018862ffffff1681526020018763ffffffff1681526020018662ffffff1681526020018561ffff16815260200184815260200183815260200182151581526020019850505050505050505060405180910390a16040805163ffffffff8a16815290517f17b46a44a823646eef686b7824df2962de896bc9a012a60b67694c5cbf184d8b9181900360200190a1505050505050505050565b6000546001600160a01b03163314620004c1576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b565b60805160601c60a05160601c60c05160601c614fb96200052160003980610d2c5280614218525080611c5852806142eb525080610c215280610fd252806113ce52806114a35280611bb55280611def5280611ebd5250614fb96000f3fe608060405234801561001057600080fd5b50600436106102415760003560e01c8063a4c0ed3611610145578063c41b813a116100bd578063eb5dcd6c1161008c578063f2fde38b11610071578063f2fde38b14610b09578063faab9d3914610b3c578063fecf27c914610b6f57610241565b8063eb5dcd6c14610ac6578063ebbece5b14610b0157610241565b8063c41b813a14610809578063c7c3a19a146108d6578063c8048022146109f5578063da5c674114610a1257610241565b8063b121e14711610114578063b79550be116100f9578063b79550be146106e7578063b7fdb436146106ef578063c3f909d4146107b157610241565b8063b121e14714610697578063b657bc9c146106ca57610241565b8063a4c0ed3614610558578063a6afef52146105ea578063a710b22114610654578063ad1783611461068f57610241565b80635c975abb116101d85780638456cb59116101a75780638da5cb5b1161018c5780638da5cb5b146104e157806393f0c1fc146104e9578063948108f71461052757610241565b80638456cb59146104b85780638a601fc8146104c057610241565b80635c975abb146103e4578063744bfe611461040057806379ba5097146104395780637bbaf1ea1461044157610241565b80632cb6864d116102145780632cb6864d146103c25780633f4ba83a146103ca5780634584a419146103d45780634d3f7334146103dc57610241565b806315a126ea14610246578063181f5a771461029e5780631b6b6d231461031b5780631e12b8a51461034c575b600080fd5b61024e610b77565b60408051602080825283518183015283519192839290830191858101910280838360005b8381101561028a578181015183820152602001610272565b505050509050019250505060405180910390f35b6102a6610be6565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102e05781810151838201526020016102c8565b50505050905090810190601f16801561030d5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610323610c1f565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190f35b61037f6004803603602081101561036257600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610c43565b6040805173ffffffffffffffffffffffffffffffffffffffff909416845291151560208401526bffffffffffffffffffffffff1682820152519081900360600190f35b61024e610cc1565b6103d2610d18565b005b610323610d2a565b610323610d4e565b6103ec610d6a565b604080519115158252519081900360200190f35b6103d26004803603604081101561041657600080fd5b508035906020013573ffffffffffffffffffffffffffffffffffffffff16610d73565b6103d2611093565b6103ec6004803603604081101561045757600080fd5b8135919081019060408101602082013564010000000081111561047957600080fd5b82018360208201111561048b57600080fd5b803590602001918460018302840111640100000000831117156104ad57600080fd5b509092509050611195565b6103d26111eb565b6104c86111fb565b6040805163ffffffff9092168252519081900360200190f35b61032361120f565b610506600480360360208110156104ff57600080fd5b503561122b565b604080516bffffffffffffffffffffffff9092168252519081900360200190f35b6103d26004803603604081101561053d57600080fd5b50803590602001356bffffffffffffffffffffffff16611261565b6103d26004803603606081101561056e57600080fd5b73ffffffffffffffffffffffffffffffffffffffff823516916020810135918101906060810160408201356401000000008111156105ab57600080fd5b8201836020820111156105bd57600080fd5b803590602001918460018302840111640100000000831117156105df57600080fd5b50909250905061148b565b6103d2600480360361012081101561060157600080fd5b5063ffffffff8135811691602081013582169162ffffff604083013581169260608101359092169160808101359091169061ffff60a0820135169060c08101359060e08101359061010001351515611727565b6103d26004803603604081101561066a57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff81358116916020013516611968565b610323611c56565b6103d2600480360360208110156106ad57600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16611c7a565b610506600480360360208110156106e057600080fd5b5035611da7565b6103d2611de3565b6103d26004803603604081101561070557600080fd5b81019060208101813564010000000081111561072057600080fd5b82018360208201111561073257600080fd5b8035906020019184602083028401116401000000008311171561075457600080fd5b91939092909160208101903564010000000081111561077257600080fd5b82018360208201111561078457600080fd5b803590602001918460208302840111640100000000831117156107a657600080fd5b509092509050611f73565b6107b9612490565b6040805163ffffffff988916815262ffffff9788166020820152959097168588015292909416606084015261ffff16608083015260a082019290925260c081019190915290519081900360e00190f35b6108426004803603604081101561081f57600080fd5b508035906020013573ffffffffffffffffffffffffffffffffffffffff1661255f565b6040518080602001868152602001858152602001848152602001838152602001828103825287818151815260200191508051906020019080838360005b8381101561089757818101518382015260200161087f565b50505050905090810190601f1680156108c45780820380516001836020036101000a031916815260200191505b50965050505050505060405180910390f35b6108f3600480360360208110156108ec57600080fd5b5035612bfe565b604051808873ffffffffffffffffffffffffffffffffffffffff1681526020018763ffffffff16815260200180602001866bffffffffffffffffffffffff1681526020018573ffffffffffffffffffffffffffffffffffffffff1681526020018473ffffffffffffffffffffffffffffffffffffffff1681526020018367ffffffffffffffff168152602001828103825287818151815260200191508051906020019080838360005b838110156109b457818101518382015260200161099c565b50505050905090810190601f1680156109e15780820380516001836020036101000a031916815260200191505b509850505050505050505060405180910390f35b6103d260048036036020811015610a0b57600080fd5b5035612da7565b610ab460048036036080811015610a2857600080fd5b73ffffffffffffffffffffffffffffffffffffffff823581169263ffffffff60208201351692604082013590921691810190608081016060820135640100000000811115610a7557600080fd5b820183602082011115610a8757600080fd5b80359060200191846001830284011164010000000083111715610aa957600080fd5b509092509050613008565b60408051918252519081900360200190f35b6103d260048036036040811015610adc57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff81358116916020013516613454565b6103ec61361e565b6103d260048036036020811015610b1f57600080fd5b503573ffffffffffffffffffffffffffffffffffffffff1661363f565b6103d260048036036020811015610b5257600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16613653565b610ab4613802565b60606006805480602002602001604051908101604052809291908181526020018280548015610bdc57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610bb1575b5050505050905090565b6040518060400160405280601481526020017f4b6565706572526567697374727920312e312e3000000000000000000000000081525081565b7f000000000000000000000000000000000000000000000000000000000000000081565b73ffffffffffffffffffffffffffffffffffffffff90811660009081526008602090815260409182902082516060810184528154948516808252740100000000000000000000000000000000000000009095046bffffffffffffffffffffffff1692810183905260019091015460ff16151592018290529192909190565b60606005805480602002602001604051908101604052809291908181526020018280548015610bdc57602002820191906000526020600020905b815481526020019060010190808311610cfb575050505050905090565b610d20613808565b610d2861388e565b565b7f000000000000000000000000000000000000000000000000000000000000000081565b600f5473ffffffffffffffffffffffffffffffffffffffff1690565b60035460ff1690565b8073ffffffffffffffffffffffffffffffffffffffff8116610df657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f63616e6e6f742073656e6420746f207a65726f20616464726573730000000000604482015290519081900360640190fd5b6000838152600760205260409020600101546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff163314610e9b57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c652062792061646d696e00000000000000000000604482015290519081900360640190fd5b6000838152600760205260409020600201544367ffffffffffffffff9091161115610f2757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f75706b656570206d7573742062652063616e63656c6564000000000000000000604482015290519081900360640190fd5b600083815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff0000000000000000000000008116909155600e546bffffffffffffffffffffffff90911690610f7e908261397c565b600e556040805182815273ffffffffffffffffffffffffffffffffffffffff85166020820152815186927ff3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318928290030190a27f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb84836040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b15801561106157600080fd5b505af1158015611075573d6000803e3d6000fd5b505050506040513d602081101561108b57600080fd5b505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff16331461111957604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015290519081900360640190fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60006111e36111de338686868080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250600192506139f3915050565b613ab4565b949350505050565b6111f3613808565b610d2861411d565b600b54640100000000900463ffffffff1690565b60005473ffffffffffffffffffffffffffffffffffffffff1690565b60008060006112386141e5565b9150915060006112498360006143c4565b905061125685828461440a565b93505050505b919050565b60008281526007602052604090206002015467ffffffffffffffff908116146112eb57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f75706b656570206d757374206265206163746976650000000000000000000000604482015290519081900360640190fd5b600082815260076020526040902060010154611315906bffffffffffffffffffffffff16826145db565b600083815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff928316179055600e5461136b918316614667565b600e55604080517f23b872dd0000000000000000000000000000000000000000000000000000000081523360048201523060248201526bffffffffffffffffffffffff83166044820152905173ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016916323b872dd9160648083019260209291908290030181600087803b15801561141657600080fd5b505af115801561142a573d6000803e3d6000fd5b505050506040513d602081101561144057600080fd5b5050604080516bffffffffffffffffffffffff831681529051339184917fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039181900360200190a35050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461152f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6f6e6c792063616c6c61626c65207468726f756768204c494e4b000000000000604482015290519081900360640190fd5b6020811461159e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f64617461206d7573742062652033322062797465730000000000000000000000604482015290519081900360640190fd5b6000828260208110156115b057600080fd5b503560008181526007602052604090206002015490915067ffffffffffffffff9081161461163f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f75706b656570206d757374206265206163746976650000000000000000000000604482015290519081900360640190fd5b600081815260076020526040902060010154611669906bffffffffffffffffffffffff16856145db565b600082815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff92909216919091179055600e546116c29085614667565b600e55604080516bffffffffffffffffffffffff86168152905173ffffffffffffffffffffffffffffffffffffffff87169183917fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039181900360200190a35050505050565b61172f613808565b6040518060e001604052808a63ffffffff1681526020018963ffffffff1681526020018862ffffff1681526020018763ffffffff1681526020018662ffffff1681526020018561ffff168152602001821515815250600b60008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548162ffffff021916908362ffffff160217905550606082015181600001600b6101000a81548163ffffffff021916908363ffffffff160217905550608082015181600001600f6101000a81548162ffffff021916908362ffffff16021790555060a08201518160000160126101000a81548161ffff021916908361ffff16021790555060c08201518160000160146101000a81548160ff02191690831515021790555090505082600c8190555081600d819055507f6db8cdacf21c3bbd6135926f497c6fba81fd6969684ecf85f56550d2b1f8e6918988888888888888604051808963ffffffff1681526020018862ffffff1681526020018763ffffffff1681526020018662ffffff1681526020018561ffff16815260200184815260200183815260200182151581526020019850505050505050505060405180910390a16040805163ffffffff8a16815290517f17b46a44a823646eef686b7824df2962de896bc9a012a60b67694c5cbf184d8b9181900360200190a1505050505050505050565b8073ffffffffffffffffffffffffffffffffffffffff81166119eb57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f63616e6e6f742073656e6420746f207a65726f20616464726573730000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff83811660009081526008602090815260409182902082516060810184528154948516808252740100000000000000000000000000000000000000009095046bffffffffffffffffffffffff16928101929092526001015460ff16151591810191909152903314611ad157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c6520627920706179656500000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff80851660009081526008602090815260409091208054909216909155810151600e54611b1f916bffffffffffffffffffffffff1661397c565b600e819055508273ffffffffffffffffffffffffffffffffffffffff1681602001516bffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167f9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f4069833604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a47f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb8483602001516040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff168152602001826bffffffffffffffffffffffff16815260200192505050602060405180830381600087803b15801561106157600080fd5b7f000000000000000000000000000000000000000000000000000000000000000081565b73ffffffffffffffffffffffffffffffffffffffff818116600090815260096020526040902054163314611d0f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f6f6e6c792063616c6c61626c652062792070726f706f73656420706179656500604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff81811660008181526008602090815260408083208054337fffffffffffffffffffffffff000000000000000000000000000000000000000080831682179093556009909452828520805490921690915590519416939092849290917f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b39190a45050565b600081815260076020526040812054611ddd9074010000000000000000000000000000000000000000900463ffffffff1661122b565b92915050565b611deb613808565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166370a08231306040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611e7457600080fd5b505afa158015611e88573d6000803e3d6000fd5b505050506040513d6020811015611e9e57600080fd5b5051600e5490915073ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb903390611ef190859061397c565b6040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b158015611f4457600080fd5b505af1158015611f58573d6000803e3d6000fd5b505050506040513d6020811015611f6e57600080fd5b505050565b611f7b613808565b828114611fd3576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526021815260200180614f8c6021913960400191505060405180910390fd5b600283101561204357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6e6f7420656e6f756768206b6565706572730000000000000000000000000000604482015290519081900360640190fd5b60005b6006548110156120c35760006006828154811061205f57fe5b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff1682526008905260409020600190810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055919091019050612046565b5060005b838110156123ad5760008585838181106120dd57fe5b73ffffffffffffffffffffffffffffffffffffffff602091820293909301358316600081815260089092526040822080549195509316915086868681811061212157fe5b9050602002013573ffffffffffffffffffffffffffffffffffffffff169050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614156121c6576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526024815260200180614f246024913960400191505060405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8216158061221457508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16145b80612234575073ffffffffffffffffffffffffffffffffffffffff818116145b61229f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f63616e6e6f74206368616e676520706179656500000000000000000000000000604482015290519081900360640190fd5b600183015460ff161561231357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f63616e6e6f7420616464206b6565706572207477696365000000000000000000604482015290519081900360640190fd5b600183810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016909117905573ffffffffffffffffffffffffffffffffffffffff8181161461239d5782547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff82161783555b5050600190920191506120c79050565b506123ba60068585614d99565b507f056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f848484846040518080602001806020018381038352878782818152602001925060200280828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169091018481038352858152602090810191508690860280828437600083820152604051601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018290039850909650505050505050a150505050565b6040805160e081018252600b5463ffffffff80821680845264010000000083048216602085015268010000000000000000830462ffffff9081169585018690526b0100000000000000000000008404909216606085018190526f010000000000000000000000000000008404909216608085018190527201000000000000000000000000000000000000840461ffff1660a086018190527401000000000000000000000000000000000000000090940460ff16151560c090950194909452600c54600d54919692949392909190565b606060008060008061256f610d6a565b156125db57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a2070617573656400000000000000000000000000000000604482015290519081900360640190fd5b6125e36146db565b6000878152600760209081526040808320815160c081018352815473ffffffffffffffffffffffffffffffffffffffff80821683527401000000000000000000000000000000000000000090910463ffffffff16828601526001808401546bffffffffffffffffffffffff8116848701526c0100000000000000000000000090048216606084015260029384015467ffffffffffffffff8116608085015268010000000000000000900490911660a08301528c8652600a8552838620935160248101958652845461010092811615929092027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190911692909204604483018190529094937f6e04ff0d0000000000000000000000000000000000000000000000000000000093929091829160640190849080156127625780601f1061273757610100808354040283529160200191612762565b820191906000526020600020905b81548152906001019060200180831161274557829003601f168201915b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009097169690961786528751600b549151835193985060009788975073ffffffffffffffffffffffffffffffffffffffff909216955063ffffffff6b01000000000000000000000090930492909216935087928291908083835b6020831061286d57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101612830565b6001836020036101000a03801982511681845116808217855250505050505090500191505060006040518083038160008787f1925050503d80600081146128d0576040519150601f19603f3d011682016040523d82523d6000602084013e6128d5565b606091505b509150915081612a705760006128ea82614748565b905060008160405160200180807f63616c6c20746f20636865636b20746172676574206661696c65643a20000000815250601d0182805190602001908083835b6020831061296757805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161292a565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff018019909216911617905260408051929094018281037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0018352938490527f08c379a00000000000000000000000000000000000000000000000000000000084526004840181815282516024860152825192975087965094508493604401925085019080838360005b83811015612a35578181015183820152602001612a1d565b50505050905090810190601f168015612a625780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b808060200190516040811015612a8557600080fd5b815160208301805160405192949293830192919084640100000000821115612aac57600080fd5b908301906020820185811115612ac157600080fd5b8251640100000000811182820188101715612adb57600080fd5b82525081516020918201929091019080838360005b83811015612b08578181015183820152602001612af0565b50505050905090810190601f168015612b355780820380516001836020036101000a031916815260200191505b50604052505050809a50819350505081612bb057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f75706b656570206e6f74206e6565646564000000000000000000000000000000604482015290519081900360640190fd5b6000612bbf8b8d8c60006139f3565b9050612bd4858260000151836060015161485f565b6060810151608082015160a083015160c0909301519b9e919d509b50909998509650505050505050565b6000818152600760209081526040808320815160c081018352815473ffffffffffffffffffffffffffffffffffffffff8082168084527401000000000000000000000000000000000000000090920463ffffffff168387018190526001808601546bffffffffffffffffffffffff81168689019081526c010000000000000000000000009091048416606080880191825260029889015467ffffffffffffffff811660808a019081526801000000000000000090910490961660a089019081528d8d52600a8c528a8d20935190519251965184548c5161010097821615979097027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01169a909a04601f81018d90048d0286018d01909b528a85528c9b919a8c9a8b9a8b9a8b9a91999098909796949591939091879190830182828015612d865780601f10612d5b57610100808354040283529160200191612d86565b820191906000526020600020905b815481529060010190602001808311612d6957829003601f168201915b50505050509450975097509750975097509750975050919395979092949650565b60008181526007602052604081206002015467ffffffffffffffff9081169190821490612dd261120f565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161490508180612e1f5750808015612e1f5750438367ffffffffffffffff16115b612e8a57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f746f6f206c61746520746f2063616e63656c2075706b65657000000000000000604482015290519081900360640190fd5b8080612ecc57506000848152600760205260409020600101546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1633145b612f3757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f6f6e6c79206f776e6572206f722061646d696e00000000000000000000000000604482015290519081900360640190fd5b4381612f4b57612f48816032614667565b90505b600085815260076020526040902060020180547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff83161790558215612fca57600580546001810182556000919091527f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0018590555b60405167ffffffffffffffff82169086907f91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f79118190600090a35050505050565b600061301261120f565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806130625750600f5473ffffffffffffffffffffffffffffffffffffffff1633145b6130b7576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180614f486023913960400191505060405180910390fd5b6130d68673ffffffffffffffffffffffffffffffffffffffff16614a39565b61314157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f746172676574206973206e6f74206120636f6e74726163740000000000000000604482015290519081900360640190fd5b6108fc8563ffffffff1610156131b857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600f60248201527f6d696e2067617320697320323330300000000000000000000000000000000000604482015290519081900360640190fd5b624c4b408563ffffffff16111561323057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6d61782067617320697320353030303030300000000000000000000000000000604482015290519081900360640190fd5b506004546040805160c08101825273ffffffffffffffffffffffffffffffffffffffff808916825263ffffffff808916602080850191825260008587018181528b86166060880190815267ffffffffffffffff6080890181815260a08a018581528c8652600787528b86209a518b54985190991674010000000000000000000000000000000000000000027fffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffff998b167fffffffffffffffffffffffff000000000000000000000000000000000000000090991698909817989098169690961789559151600189018054925189166c01000000000000000000000000026bffffffffffffffffffffffff9283167fffffffffffffffffffffffffffffffffffffffff00000000000000000000000090941693909317909116919091179055925160029096018054945190951668010000000000000000027fffffffff0000000000000000000000000000000000000000ffffffffffffffff969093167fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000909416939093179490941617909155600a909152206133ec908484614e21565b506004805460010190556040805163ffffffff8716815273ffffffffffffffffffffffffffffffffffffffff86166020820152815183927fbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012928290030190a295945050505050565b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600860205260409020541633146134e957604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c6520627920706179656500000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff811633141561356e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f63616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff82811660009081526009602052604090205481169082161461361a5773ffffffffffffffffffffffffffffffffffffffff82811660008181526009602052604080822080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169486169485179055513392917f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836791a45b5050565b600b5474010000000000000000000000000000000000000000900460ff1690565b613647613808565b61365081614a3f565b50565b61365b61120f565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806136ab5750600f5473ffffffffffffffffffffffffffffffffffffffff1633145b613700576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180614f486023913960400191505060405180910390fd5b600f5473ffffffffffffffffffffffffffffffffffffffff90811690821681141561378c57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600e60248201527f53616d6520726567697374726172000000000000000000000000000000000000604482015290519081900360640190fd5b600f80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff84811691821790925560405190918316907f9bf4a5b30267728df68663e14adb47e559863967c419dc6030638883408bed2e90600090a35050565b60045490565b60005473ffffffffffffffffffffffffffffffffffffffff163314610d2857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b613896610d6a565b61390157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f5061757361626c653a206e6f7420706175736564000000000000000000000000604482015290519081900360640190fd5b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa613952614b3a565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190a1565b6000828211156139ed57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b50900390565b6139fb614ebb565b60008481526007602052604081205474010000000000000000000000000000000000000000900463ffffffff169080613a326141e5565b915091506000613a4283876143c4565b90506000613a5185838561440a565b6040805160e08101825273ffffffffffffffffffffffffffffffffffffffff8d168152602081018c90529081018a90526bffffffffffffffffffffffff909116606082015260808101959095525060a084015260c0830152509050949350505050565b6000600280541415613b2757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f5265656e7472616e637947756172643a207265656e7472616e742063616c6c00604482015290519081900360640190fd5b600280556020820151613b3981614b3e565b602083810151600090815260078252604090819020815160c081018352815473ffffffffffffffffffffffffffffffffffffffff80821683527401000000000000000000000000000000000000000090910463ffffffff169482019490945260018201546bffffffffffffffffffffffff8116938201939093526c01000000000000000000000000909204831660608084019190915260029091015467ffffffffffffffff8116608084015268010000000000000000900490921660a08201528451918501519091613c0d9183919061485f565b60005a90506000634585e33b60e01b86604001516040516024018080602001828103825283818151815260200191508051906020019080838360005b83811015613c61578181015183820152602001613c49565b50505050905090810190601f168015613c8e5780820380516001836020036101000a031916815260200191505b50604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009096169590951790945250505060808701518451919250613d239183614bc9565b94505a820391506000613d3f838860a001518960c0015161440a565b6040850151909150613d5f906bffffffffffffffffffffffff1682614c15565b84604001906bffffffffffffffffffffffff1690816bffffffffffffffffffffffff168152505086600001518460a0019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505083600760008960200151815260200190815260200160002060008201518160000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060208201518160000160146101000a81548163ffffffff021916908363ffffffff16021790555060408201518160010160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550606082015181600101600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060808201518160020160006101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555060a08201518160020160086101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050506000613fc082600860008b6000015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060000160149054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff166145db90919063ffffffff16565b905080600860008a6000015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060000160146101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550876000015173ffffffffffffffffffffffffffffffffffffffff1687151589602001517fcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6858c6040015160405180836bffffffffffffffffffffffff16815260200180602001828103825283818151815260200191508051906020019080838360005b838110156140d25781810151838201526020016140ba565b50505050905090810190601f1680156140ff5780820380516001836020036101000a031916815260200191505b50935050505060405180910390a45050505050506001600255919050565b614125610d6a565b1561419157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a2070617573656400000000000000000000000000000000604482015290519081900360640190fd5b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258613952614b3a565b6000806000600b600001600f9054906101000a900462ffffff1662ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b15801561427c57600080fd5b505afa158015614290573d6000803e3d6000fd5b505050506040513d60a08110156142a657600080fd5b506020810151606090910151925090508280156142ca57508142038463ffffffff16105b806142d6575060008113155b156142e557600c5495506142e9565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b15801561434f57600080fd5b505afa158015614363573d6000803e3d6000fd5b505050506040513d60a081101561437957600080fd5b5060208101516060909101519250905082801561439d57508142038463ffffffff16105b806143a9575060008113155b156143b857600d5494506143bc565b8094505b505050509091565b600b546000906143ef9084907201000000000000000000000000000000000000900461ffff16614ca2565b90508180156143fd5750803a105b15611ddd57503a92915050565b6040805160e081018252600b5463ffffffff808216835264010000000082048116602084015262ffffff6801000000000000000083048116948401949094526b0100000000000000000000008204811660608401526f010000000000000000000000000000008204909316608083015261ffff720100000000000000000000000000000000000082041660a083015260ff7401000000000000000000000000000000000000000090910416151560c082015260009182906144dd906144d6908890620138809061466716565b8690614ca2565b90506000614502836000015163ffffffff16633b9aca0061466790919063ffffffff16565b9050600061455361452b64e8d4a51000866020015163ffffffff16614ca290919063ffffffff16565b61454d886145478661454189633b9aca00614ca2565b90614ca2565b90614d15565b90614667565b90506b033b2e3c9fd0803ce80000008111156145d057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f7061796d656e742067726561746572207468616e20616c6c204c494e4b000000604482015290519081900360640190fd5b979650505050505050565b60008282016bffffffffffffffffffffffff808516908216101561466057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b9392505050565b60008282018381101561466057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b3215610d2857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6f6e6c7920666f722073696d756c61746564206261636b656e64000000000000604482015290519081900360640190fd5b606060448251101561478e575060408051808201909152601d81527f7472616e73616374696f6e2072657665727465642073696c656e746c79000000602082015261125c565b60048201805190926024019060208110156147a857600080fd5b81019080805160405193929190846401000000008211156147c857600080fd5b9083019060208201858111156147dd57600080fd5b82516401000000008111828201881017156147f757600080fd5b82525081516020918201929091019080838360005b8381101561482457818101518382015260200161480c565b50505050905090810190601f1680156148515780820380516001836020036101000a031916815260200191505b506040525050509050919050565b73ffffffffffffffffffffffffffffffffffffffff821660009081526008602052604090206001015460ff166148f657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f6f6e6c7920616374697665206b65657065727300000000000000000000000000604482015290519081900360640190fd5b8083604001516bffffffffffffffffffffffff16101561497757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f696e73756666696369656e742066756e64730000000000000000000000000000604482015290519081900360640190fd5b600b5474010000000000000000000000000000000000000000900460ff1615611f6e578173ffffffffffffffffffffffffffffffffffffffff168360a0015173ffffffffffffffffffffffffffffffffffffffff161415611f6e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f6b656570657273206d7573742074616b65207475726e73000000000000000000604482015290519081900360640190fd5b3b151590565b73ffffffffffffffffffffffffffffffffffffffff8116331415614ac457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b3390565b6000818152600760205260409020600201544367ffffffffffffffff9091161161365057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f696e76616c69642075706b656570206964000000000000000000000000000000604482015290519081900360640190fd5b60005a611388811015614bdb57600080fd5b611388810390508460408204820311614bf357600080fd5b50823b614bff57600080fd5b60008083516020850160008789f1949350505050565b6000826bffffffffffffffffffffffff16826bffffffffffffffffffffffff1611156139ed57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b600082614cb157506000611ddd565b82820282848281614cbe57fe5b0414614660576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526021815260200180614f6b6021913960400191505060405180910390fd5b6000808211614d8557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f536166654d6174683a206469766973696f6e206279207a65726f000000000000604482015290519081900360640190fd5b6000828481614d9057fe5b04949350505050565b828054828255906000526020600020908101928215614e11579160200282015b82811115614e115781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190614db9565b50614e1d929150614f0e565b5090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282614e575760008555614e11565b82601f10614e8e578280017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00823516178555614e11565b82800160010185558215614e11579182015b82811115614e11578235825591602001919060010190614ea0565b6040518060e00160405280600073ffffffffffffffffffffffffffffffffffffffff1681526020016000815260200160608152602001600081526020016000815260200160008152602001600081525090565b5b80821115614e1d5760008155600101614f0f56fe63616e6e6f742073657420706179656520746f20746865207a65726f20616464726573734f6e6c792063616c6c61626c65206279206f776e6572206f7220726567697374726172536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f7761646472657373206c69737473206e6f74207468652073616d65206c656e677468a164736f6c6343000706000a", -} - -var KeeperRegistryVBABI = KeeperRegistryVBMetaData.ABI - -var KeeperRegistryVBBin = KeeperRegistryVBMetaData.Bin - -func DeployKeeperRegistryVB(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, linkEthFeed common.Address, fastGasFeed common.Address, paymentPremiumPPB uint32, flatFeeMicroLink uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int, mustTakeTurns bool) (common.Address, *types.Transaction, *KeeperRegistryVB, error) { - parsed, err := KeeperRegistryVBMetaData.GetAbi() - if err != nil { - return common.Address{}, nil, nil, err - } - if parsed == nil { - return common.Address{}, nil, nil, errors.New("GetABI returned nil") - } - - address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryVBBin), backend, link, linkEthFeed, fastGasFeed, paymentPremiumPPB, flatFeeMicroLink, blockCountPerTurn, checkGasLimit, stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, fallbackLinkPrice, mustTakeTurns) - if err != nil { - return common.Address{}, nil, nil, err - } - return address, tx, &KeeperRegistryVB{KeeperRegistryVBCaller: KeeperRegistryVBCaller{contract: contract}, KeeperRegistryVBTransactor: KeeperRegistryVBTransactor{contract: contract}, KeeperRegistryVBFilterer: KeeperRegistryVBFilterer{contract: contract}}, nil -} - -type KeeperRegistryVB struct { - address common.Address - abi abi.ABI - KeeperRegistryVBCaller - KeeperRegistryVBTransactor - KeeperRegistryVBFilterer -} - -type KeeperRegistryVBCaller struct { - contract *bind.BoundContract -} - -type KeeperRegistryVBTransactor struct { - contract *bind.BoundContract -} - -type KeeperRegistryVBFilterer struct { - contract *bind.BoundContract -} - -type KeeperRegistryVBSession struct { - Contract *KeeperRegistryVB - CallOpts bind.CallOpts - TransactOpts bind.TransactOpts -} - -type KeeperRegistryVBCallerSession struct { - Contract *KeeperRegistryVBCaller - CallOpts bind.CallOpts -} - -type KeeperRegistryVBTransactorSession struct { - Contract *KeeperRegistryVBTransactor - TransactOpts bind.TransactOpts -} - -type KeeperRegistryVBRaw struct { - Contract *KeeperRegistryVB -} - -type KeeperRegistryVBCallerRaw struct { - Contract *KeeperRegistryVBCaller -} - -type KeeperRegistryVBTransactorRaw struct { - Contract *KeeperRegistryVBTransactor -} - -func NewKeeperRegistryVB(address common.Address, backend bind.ContractBackend) (*KeeperRegistryVB, error) { - abi, err := abi.JSON(strings.NewReader(KeeperRegistryVBABI)) - if err != nil { - return nil, err - } - contract, err := bindKeeperRegistryVB(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &KeeperRegistryVB{address: address, abi: abi, KeeperRegistryVBCaller: KeeperRegistryVBCaller{contract: contract}, KeeperRegistryVBTransactor: KeeperRegistryVBTransactor{contract: contract}, KeeperRegistryVBFilterer: KeeperRegistryVBFilterer{contract: contract}}, nil -} - -func NewKeeperRegistryVBCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryVBCaller, error) { - contract, err := bindKeeperRegistryVB(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &KeeperRegistryVBCaller{contract: contract}, nil -} - -func NewKeeperRegistryVBTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryVBTransactor, error) { - contract, err := bindKeeperRegistryVB(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &KeeperRegistryVBTransactor{contract: contract}, nil -} - -func NewKeeperRegistryVBFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryVBFilterer, error) { - contract, err := bindKeeperRegistryVB(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &KeeperRegistryVBFilterer{contract: contract}, nil -} - -func bindKeeperRegistryVB(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(KeeperRegistryVBABI)) - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _KeeperRegistryVB.Contract.KeeperRegistryVBCaller.contract.Call(opts, result, method, params...) -} - -func (_KeeperRegistryVB *KeeperRegistryVBRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.KeeperRegistryVBTransactor.contract.Transfer(opts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.KeeperRegistryVBTransactor.contract.Transact(opts, method, params...) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _KeeperRegistryVB.Contract.contract.Call(opts, result, method, params...) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.contract.Transfer(opts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.contract.Transact(opts, method, params...) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) FASTGASFEED(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "FAST_GAS_FEED") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) FASTGASFEED() (common.Address, error) { - return _KeeperRegistryVB.Contract.FASTGASFEED(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) FASTGASFEED() (common.Address, error) { - return _KeeperRegistryVB.Contract.FASTGASFEED(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) LINK(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "LINK") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) LINK() (common.Address, error) { - return _KeeperRegistryVB.Contract.LINK(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) LINK() (common.Address, error) { - return _KeeperRegistryVB.Contract.LINK(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) LINKETHFEED(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "LINK_ETH_FEED") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) LINKETHFEED() (common.Address, error) { - return _KeeperRegistryVB.Contract.LINKETHFEED(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) LINKETHFEED() (common.Address, error) { - return _KeeperRegistryVB.Contract.LINKETHFEED(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) GetCanceledUpkeepList(opts *bind.CallOpts) ([]*big.Int, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "getCanceledUpkeepList") - - if err != nil { - return *new([]*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) GetCanceledUpkeepList() ([]*big.Int, error) { - return _KeeperRegistryVB.Contract.GetCanceledUpkeepList(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) GetCanceledUpkeepList() ([]*big.Int, error) { - return _KeeperRegistryVB.Contract.GetCanceledUpkeepList(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) GetConfig(opts *bind.CallOpts) (GetConfig, - - error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "getConfig") - - outstruct := new(GetConfig) - if err != nil { - return *outstruct, err - } - - outstruct.PaymentPremiumPPB = *abi.ConvertType(out[0], new(uint32)).(*uint32) - outstruct.BlockCountPerTurn = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) - outstruct.CheckGasLimit = *abi.ConvertType(out[2], new(uint32)).(*uint32) - outstruct.StalenessSeconds = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) - outstruct.GasCeilingMultiplier = *abi.ConvertType(out[4], new(uint16)).(*uint16) - outstruct.FallbackGasPrice = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) - outstruct.FallbackLinkPrice = *abi.ConvertType(out[6], new(*big.Int)).(**big.Int) - - return *outstruct, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) GetConfig() (GetConfig, - - error) { - return _KeeperRegistryVB.Contract.GetConfig(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) GetConfig() (GetConfig, - - error) { - return _KeeperRegistryVB.Contract.GetConfig(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) GetFlatFee(opts *bind.CallOpts) (uint32, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "getFlatFee") - - if err != nil { - return *new(uint32), err - } - - out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) GetFlatFee() (uint32, error) { - return _KeeperRegistryVB.Contract.GetFlatFee(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) GetFlatFee() (uint32, error) { - return _KeeperRegistryVB.Contract.GetFlatFee(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) GetKeeperInfo(opts *bind.CallOpts, query common.Address) (GetKeeperInfo, - - error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "getKeeperInfo", query) - - outstruct := new(GetKeeperInfo) - if err != nil { - return *outstruct, err - } - - outstruct.Payee = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - outstruct.Active = *abi.ConvertType(out[1], new(bool)).(*bool) - outstruct.Balance = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) - - return *outstruct, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) GetKeeperInfo(query common.Address) (GetKeeperInfo, - - error) { - return _KeeperRegistryVB.Contract.GetKeeperInfo(&_KeeperRegistryVB.CallOpts, query) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) GetKeeperInfo(query common.Address) (GetKeeperInfo, - - error) { - return _KeeperRegistryVB.Contract.GetKeeperInfo(&_KeeperRegistryVB.CallOpts, query) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) GetKeeperList(opts *bind.CallOpts) ([]common.Address, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "getKeeperList") - - if err != nil { - return *new([]common.Address), err - } - - out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) GetKeeperList() ([]common.Address, error) { - return _KeeperRegistryVB.Contract.GetKeeperList(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) GetKeeperList() ([]common.Address, error) { - return _KeeperRegistryVB.Contract.GetKeeperList(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) GetMaxPaymentForGas(opts *bind.CallOpts, gasLimit *big.Int) (*big.Int, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "getMaxPaymentForGas", gasLimit) - - if err != nil { - return *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) GetMaxPaymentForGas(gasLimit *big.Int) (*big.Int, error) { - return _KeeperRegistryVB.Contract.GetMaxPaymentForGas(&_KeeperRegistryVB.CallOpts, gasLimit) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) GetMaxPaymentForGas(gasLimit *big.Int) (*big.Int, error) { - return _KeeperRegistryVB.Contract.GetMaxPaymentForGas(&_KeeperRegistryVB.CallOpts, gasLimit) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "getMinBalanceForUpkeep", id) - - if err != nil { - return *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { - return _KeeperRegistryVB.Contract.GetMinBalanceForUpkeep(&_KeeperRegistryVB.CallOpts, id) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { - return _KeeperRegistryVB.Contract.GetMinBalanceForUpkeep(&_KeeperRegistryVB.CallOpts, id) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) GetMustTakeTurns(opts *bind.CallOpts) (bool, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "getMustTakeTurns") - - if err != nil { - return *new(bool), err - } - - out0 := *abi.ConvertType(out[0], new(bool)).(*bool) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) GetMustTakeTurns() (bool, error) { - return _KeeperRegistryVB.Contract.GetMustTakeTurns(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) GetMustTakeTurns() (bool, error) { - return _KeeperRegistryVB.Contract.GetMustTakeTurns(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) GetRegistrar(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "getRegistrar") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) GetRegistrar() (common.Address, error) { - return _KeeperRegistryVB.Contract.GetRegistrar(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) GetRegistrar() (common.Address, error) { - return _KeeperRegistryVB.Contract.GetRegistrar(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) GetUpkeep(opts *bind.CallOpts, id *big.Int) (GetUpkeep, - - error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "getUpkeep", id) - - outstruct := new(GetUpkeep) - if err != nil { - return *outstruct, err - } - - outstruct.Target = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - outstruct.ExecuteGas = *abi.ConvertType(out[1], new(uint32)).(*uint32) - outstruct.CheckData = *abi.ConvertType(out[2], new([]byte)).(*[]byte) - outstruct.Balance = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) - outstruct.LastKeeper = *abi.ConvertType(out[4], new(common.Address)).(*common.Address) - outstruct.Admin = *abi.ConvertType(out[5], new(common.Address)).(*common.Address) - outstruct.MaxValidBlocknumber = *abi.ConvertType(out[6], new(uint64)).(*uint64) - - return *outstruct, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) GetUpkeep(id *big.Int) (GetUpkeep, - - error) { - return _KeeperRegistryVB.Contract.GetUpkeep(&_KeeperRegistryVB.CallOpts, id) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) GetUpkeep(id *big.Int) (GetUpkeep, - - error) { - return _KeeperRegistryVB.Contract.GetUpkeep(&_KeeperRegistryVB.CallOpts, id) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) GetUpkeepCount(opts *bind.CallOpts) (*big.Int, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "getUpkeepCount") - - if err != nil { - return *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) GetUpkeepCount() (*big.Int, error) { - return _KeeperRegistryVB.Contract.GetUpkeepCount(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) GetUpkeepCount() (*big.Int, error) { - return _KeeperRegistryVB.Contract.GetUpkeepCount(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) Owner(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "owner") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) Owner() (common.Address, error) { - return _KeeperRegistryVB.Contract.Owner(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) Owner() (common.Address, error) { - return _KeeperRegistryVB.Contract.Owner(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) Paused(opts *bind.CallOpts) (bool, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "paused") - - if err != nil { - return *new(bool), err - } - - out0 := *abi.ConvertType(out[0], new(bool)).(*bool) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) Paused() (bool, error) { - return _KeeperRegistryVB.Contract.Paused(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) Paused() (bool, error) { - return _KeeperRegistryVB.Contract.Paused(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { - var out []interface{} - err := _KeeperRegistryVB.contract.Call(opts, &out, "typeAndVersion") - - if err != nil { - return *new(string), err - } - - out0 := *abi.ConvertType(out[0], new(string)).(*string) - - return out0, err - -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) TypeAndVersion() (string, error) { - return _KeeperRegistryVB.Contract.TypeAndVersion(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBCallerSession) TypeAndVersion() (string, error) { - return _KeeperRegistryVB.Contract.TypeAndVersion(&_KeeperRegistryVB.CallOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "acceptOwnership") -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) AcceptOwnership() (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.AcceptOwnership(&_KeeperRegistryVB.TransactOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) AcceptOwnership() (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.AcceptOwnership(&_KeeperRegistryVB.TransactOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) AcceptPayeeship(opts *bind.TransactOpts, keeper common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "acceptPayeeship", keeper) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) AcceptPayeeship(keeper common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.AcceptPayeeship(&_KeeperRegistryVB.TransactOpts, keeper) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) AcceptPayeeship(keeper common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.AcceptPayeeship(&_KeeperRegistryVB.TransactOpts, keeper) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "addFunds", id, amount) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.AddFunds(&_KeeperRegistryVB.TransactOpts, id, amount) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.AddFunds(&_KeeperRegistryVB.TransactOpts, id, amount) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "cancelUpkeep", id) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.CancelUpkeep(&_KeeperRegistryVB.TransactOpts, id) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.CancelUpkeep(&_KeeperRegistryVB.TransactOpts, id) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) CheckUpkeep(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "checkUpkeep", id, from) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) CheckUpkeep(id *big.Int, from common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.CheckUpkeep(&_KeeperRegistryVB.TransactOpts, id, from) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) CheckUpkeep(id *big.Int, from common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.CheckUpkeep(&_KeeperRegistryVB.TransactOpts, id, from) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "onTokenTransfer", sender, amount, data) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.OnTokenTransfer(&_KeeperRegistryVB.TransactOpts, sender, amount, data) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.OnTokenTransfer(&_KeeperRegistryVB.TransactOpts, sender, amount, data) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "pause") -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) Pause() (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.Pause(&_KeeperRegistryVB.TransactOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) Pause() (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.Pause(&_KeeperRegistryVB.TransactOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) PerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "performUpkeep", id, performData) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) PerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.PerformUpkeep(&_KeeperRegistryVB.TransactOpts, id, performData) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) PerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.PerformUpkeep(&_KeeperRegistryVB.TransactOpts, id, performData) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "recoverFunds") -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) RecoverFunds() (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.RecoverFunds(&_KeeperRegistryVB.TransactOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) RecoverFunds() (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.RecoverFunds(&_KeeperRegistryVB.TransactOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "registerUpkeep", target, gasLimit, admin, checkData) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.RegisterUpkeep(&_KeeperRegistryVB.TransactOpts, target, gasLimit, admin, checkData) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.RegisterUpkeep(&_KeeperRegistryVB.TransactOpts, target, gasLimit, admin, checkData) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) SetConfig(opts *bind.TransactOpts, paymentPremiumPPB uint32, flatFeeMicroLink uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int, mustTakeTurns bool) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "setConfig", paymentPremiumPPB, flatFeeMicroLink, blockCountPerTurn, checkGasLimit, stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, fallbackLinkPrice, mustTakeTurns) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) SetConfig(paymentPremiumPPB uint32, flatFeeMicroLink uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int, mustTakeTurns bool) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.SetConfig(&_KeeperRegistryVB.TransactOpts, paymentPremiumPPB, flatFeeMicroLink, blockCountPerTurn, checkGasLimit, stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, fallbackLinkPrice, mustTakeTurns) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) SetConfig(paymentPremiumPPB uint32, flatFeeMicroLink uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int, mustTakeTurns bool) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.SetConfig(&_KeeperRegistryVB.TransactOpts, paymentPremiumPPB, flatFeeMicroLink, blockCountPerTurn, checkGasLimit, stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, fallbackLinkPrice, mustTakeTurns) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) SetKeepers(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "setKeepers", keepers, payees) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) SetKeepers(keepers []common.Address, payees []common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.SetKeepers(&_KeeperRegistryVB.TransactOpts, keepers, payees) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) SetKeepers(keepers []common.Address, payees []common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.SetKeepers(&_KeeperRegistryVB.TransactOpts, keepers, payees) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) SetRegistrar(opts *bind.TransactOpts, registrar common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "setRegistrar", registrar) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) SetRegistrar(registrar common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.SetRegistrar(&_KeeperRegistryVB.TransactOpts, registrar) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) SetRegistrar(registrar common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.SetRegistrar(&_KeeperRegistryVB.TransactOpts, registrar) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "transferOwnership", to) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) TransferOwnership(to common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.TransferOwnership(&_KeeperRegistryVB.TransactOpts, to) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.TransferOwnership(&_KeeperRegistryVB.TransactOpts, to) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) TransferPayeeship(opts *bind.TransactOpts, keeper common.Address, proposed common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "transferPayeeship", keeper, proposed) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) TransferPayeeship(keeper common.Address, proposed common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.TransferPayeeship(&_KeeperRegistryVB.TransactOpts, keeper, proposed) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) TransferPayeeship(keeper common.Address, proposed common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.TransferPayeeship(&_KeeperRegistryVB.TransactOpts, keeper, proposed) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "unpause") -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) Unpause() (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.Unpause(&_KeeperRegistryVB.TransactOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) Unpause() (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.Unpause(&_KeeperRegistryVB.TransactOpts) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "withdrawFunds", id, to) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.WithdrawFunds(&_KeeperRegistryVB.TransactOpts, id, to) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.WithdrawFunds(&_KeeperRegistryVB.TransactOpts, id, to) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactor) WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.contract.Transact(opts, "withdrawPayment", from, to) -} - -func (_KeeperRegistryVB *KeeperRegistryVBSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.WithdrawPayment(&_KeeperRegistryVB.TransactOpts, from, to) -} - -func (_KeeperRegistryVB *KeeperRegistryVBTransactorSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { - return _KeeperRegistryVB.Contract.WithdrawPayment(&_KeeperRegistryVB.TransactOpts, from, to) -} - -type KeeperRegistryVBConfigSetIterator struct { - Event *KeeperRegistryVBConfigSet - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBConfigSetIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBConfigSet) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBConfigSet) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBConfigSetIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBConfigSetIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBConfigSet struct { - PaymentPremiumPPB uint32 - BlockCountPerTurn *big.Int - CheckGasLimit uint32 - StalenessSeconds *big.Int - GasCeilingMultiplier uint16 - FallbackGasPrice *big.Int - FallbackLinkPrice *big.Int - MustTakeTurns bool - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryVBConfigSetIterator, error) { - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "ConfigSet") - if err != nil { - return nil, err - } - return &KeeperRegistryVBConfigSetIterator{contract: _KeeperRegistryVB.contract, event: "ConfigSet", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBConfigSet) (event.Subscription, error) { - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "ConfigSet") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBConfigSet) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "ConfigSet", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParseConfigSet(log types.Log) (*KeeperRegistryVBConfigSet, error) { - event := new(KeeperRegistryVBConfigSet) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "ConfigSet", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBFlatFeeSetIterator struct { - Event *KeeperRegistryVBFlatFeeSet - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBFlatFeeSetIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBFlatFeeSet) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBFlatFeeSet) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBFlatFeeSetIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBFlatFeeSetIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBFlatFeeSet struct { - FlatFeeMicroLink uint32 - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterFlatFeeSet(opts *bind.FilterOpts) (*KeeperRegistryVBFlatFeeSetIterator, error) { - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "FlatFeeSet") - if err != nil { - return nil, err - } - return &KeeperRegistryVBFlatFeeSetIterator{contract: _KeeperRegistryVB.contract, event: "FlatFeeSet", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchFlatFeeSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBFlatFeeSet) (event.Subscription, error) { - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "FlatFeeSet") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBFlatFeeSet) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "FlatFeeSet", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParseFlatFeeSet(log types.Log) (*KeeperRegistryVBFlatFeeSet, error) { - event := new(KeeperRegistryVBFlatFeeSet) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "FlatFeeSet", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBFundsAddedIterator struct { - Event *KeeperRegistryVBFundsAdded - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBFundsAddedIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBFundsAdded) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBFundsAdded) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBFundsAddedIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBFundsAddedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBFundsAdded struct { - Id *big.Int - From common.Address - Amount *big.Int - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryVBFundsAddedIterator, error) { - - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) - if err != nil { - return nil, err - } - return &KeeperRegistryVBFundsAddedIterator{contract: _KeeperRegistryVB.contract, event: "FundsAdded", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { - - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBFundsAdded) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "FundsAdded", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParseFundsAdded(log types.Log) (*KeeperRegistryVBFundsAdded, error) { - event := new(KeeperRegistryVBFundsAdded) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "FundsAdded", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBFundsWithdrawnIterator struct { - Event *KeeperRegistryVBFundsWithdrawn - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBFundsWithdrawnIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBFundsWithdrawn) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBFundsWithdrawn) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBFundsWithdrawnIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBFundsWithdrawnIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBFundsWithdrawn struct { - Id *big.Int - Amount *big.Int - To common.Address - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryVBFundsWithdrawnIterator, error) { - - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "FundsWithdrawn", idRule) - if err != nil { - return nil, err - } - return &KeeperRegistryVBFundsWithdrawnIterator{contract: _KeeperRegistryVB.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBFundsWithdrawn, id []*big.Int) (event.Subscription, error) { - - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "FundsWithdrawn", idRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBFundsWithdrawn) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParseFundsWithdrawn(log types.Log) (*KeeperRegistryVBFundsWithdrawn, error) { - event := new(KeeperRegistryVBFundsWithdrawn) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBKeepersUpdatedIterator struct { - Event *KeeperRegistryVBKeepersUpdated - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBKeepersUpdatedIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBKeepersUpdated) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBKeepersUpdated) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBKeepersUpdatedIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBKeepersUpdatedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBKeepersUpdated struct { - Keepers []common.Address - Payees []common.Address - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterKeepersUpdated(opts *bind.FilterOpts) (*KeeperRegistryVBKeepersUpdatedIterator, error) { - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "KeepersUpdated") - if err != nil { - return nil, err - } - return &KeeperRegistryVBKeepersUpdatedIterator{contract: _KeeperRegistryVB.contract, event: "KeepersUpdated", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchKeepersUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBKeepersUpdated) (event.Subscription, error) { - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "KeepersUpdated") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBKeepersUpdated) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "KeepersUpdated", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParseKeepersUpdated(log types.Log) (*KeeperRegistryVBKeepersUpdated, error) { - event := new(KeeperRegistryVBKeepersUpdated) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "KeepersUpdated", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBOwnershipTransferRequestedIterator struct { - Event *KeeperRegistryVBOwnershipTransferRequested - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBOwnershipTransferRequestedIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBOwnershipTransferRequested) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBOwnershipTransferRequested) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBOwnershipTransferRequestedIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBOwnershipTransferRequestedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBOwnershipTransferRequested struct { - From common.Address - To common.Address - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryVBOwnershipTransferRequestedIterator, error) { - - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) - if err != nil { - return nil, err - } - return &KeeperRegistryVBOwnershipTransferRequestedIterator{contract: _KeeperRegistryVB.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { - - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBOwnershipTransferRequested) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryVBOwnershipTransferRequested, error) { - event := new(KeeperRegistryVBOwnershipTransferRequested) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBOwnershipTransferredIterator struct { - Event *KeeperRegistryVBOwnershipTransferred - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBOwnershipTransferredIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBOwnershipTransferred) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBOwnershipTransferred) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBOwnershipTransferredIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBOwnershipTransferredIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBOwnershipTransferred struct { - From common.Address - To common.Address - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryVBOwnershipTransferredIterator, error) { - - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) - if err != nil { - return nil, err - } - return &KeeperRegistryVBOwnershipTransferredIterator{contract: _KeeperRegistryVB.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { - - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBOwnershipTransferred) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryVBOwnershipTransferred, error) { - event := new(KeeperRegistryVBOwnershipTransferred) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBPausedIterator struct { - Event *KeeperRegistryVBPaused - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBPausedIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBPaused) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBPaused) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBPausedIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBPausedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBPaused struct { - Account common.Address - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryVBPausedIterator, error) { - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "Paused") - if err != nil { - return nil, err - } - return &KeeperRegistryVBPausedIterator{contract: _KeeperRegistryVB.contract, event: "Paused", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBPaused) (event.Subscription, error) { - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "Paused") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBPaused) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "Paused", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParsePaused(log types.Log) (*KeeperRegistryVBPaused, error) { - event := new(KeeperRegistryVBPaused) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "Paused", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBPayeeshipTransferRequestedIterator struct { - Event *KeeperRegistryVBPayeeshipTransferRequested - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBPayeeshipTransferRequestedIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBPayeeshipTransferRequested) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBPayeeshipTransferRequested) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBPayeeshipTransferRequestedIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBPayeeshipTransferRequestedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBPayeeshipTransferRequested struct { - Keeper common.Address - From common.Address - To common.Address - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryVBPayeeshipTransferRequestedIterator, error) { - - var keeperRule []interface{} - for _, keeperItem := range keeper { - keeperRule = append(keeperRule, keeperItem) - } - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "PayeeshipTransferRequested", keeperRule, fromRule, toRule) - if err != nil { - return nil, err - } - return &KeeperRegistryVBPayeeshipTransferRequestedIterator{contract: _KeeperRegistryVB.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBPayeeshipTransferRequested, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { - - var keeperRule []interface{} - for _, keeperItem := range keeper { - keeperRule = append(keeperRule, keeperItem) - } - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "PayeeshipTransferRequested", keeperRule, fromRule, toRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBPayeeshipTransferRequested) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryVBPayeeshipTransferRequested, error) { - event := new(KeeperRegistryVBPayeeshipTransferRequested) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBPayeeshipTransferredIterator struct { - Event *KeeperRegistryVBPayeeshipTransferred - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBPayeeshipTransferredIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBPayeeshipTransferred) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBPayeeshipTransferred) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBPayeeshipTransferredIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBPayeeshipTransferredIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBPayeeshipTransferred struct { - Keeper common.Address - From common.Address - To common.Address - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryVBPayeeshipTransferredIterator, error) { - - var keeperRule []interface{} - for _, keeperItem := range keeper { - keeperRule = append(keeperRule, keeperItem) - } - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "PayeeshipTransferred", keeperRule, fromRule, toRule) - if err != nil { - return nil, err - } - return &KeeperRegistryVBPayeeshipTransferredIterator{contract: _KeeperRegistryVB.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBPayeeshipTransferred, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { - - var keeperRule []interface{} - for _, keeperItem := range keeper { - keeperRule = append(keeperRule, keeperItem) - } - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "PayeeshipTransferred", keeperRule, fromRule, toRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBPayeeshipTransferred) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryVBPayeeshipTransferred, error) { - event := new(KeeperRegistryVBPayeeshipTransferred) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBPaymentWithdrawnIterator struct { - Event *KeeperRegistryVBPaymentWithdrawn - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBPaymentWithdrawnIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBPaymentWithdrawn) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBPaymentWithdrawn) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBPaymentWithdrawnIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBPaymentWithdrawnIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBPaymentWithdrawn struct { - Keeper common.Address - Amount *big.Int - To common.Address - Payee common.Address - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, keeper []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryVBPaymentWithdrawnIterator, error) { - - var keeperRule []interface{} - for _, keeperItem := range keeper { - keeperRule = append(keeperRule, keeperItem) - } - var amountRule []interface{} - for _, amountItem := range amount { - amountRule = append(amountRule, amountItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "PaymentWithdrawn", keeperRule, amountRule, toRule) - if err != nil { - return nil, err - } - return &KeeperRegistryVBPaymentWithdrawnIterator{contract: _KeeperRegistryVB.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBPaymentWithdrawn, keeper []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { - - var keeperRule []interface{} - for _, keeperItem := range keeper { - keeperRule = append(keeperRule, keeperItem) - } - var amountRule []interface{} - for _, amountItem := range amount { - amountRule = append(amountRule, amountItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "PaymentWithdrawn", keeperRule, amountRule, toRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBPaymentWithdrawn) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryVBPaymentWithdrawn, error) { - event := new(KeeperRegistryVBPaymentWithdrawn) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBRegistrarChangedIterator struct { - Event *KeeperRegistryVBRegistrarChanged - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBRegistrarChangedIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBRegistrarChanged) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBRegistrarChanged) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBRegistrarChangedIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBRegistrarChangedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBRegistrarChanged struct { - From common.Address - To common.Address - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterRegistrarChanged(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryVBRegistrarChangedIterator, error) { - - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "RegistrarChanged", fromRule, toRule) - if err != nil { - return nil, err - } - return &KeeperRegistryVBRegistrarChangedIterator{contract: _KeeperRegistryVB.contract, event: "RegistrarChanged", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchRegistrarChanged(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBRegistrarChanged, from []common.Address, to []common.Address) (event.Subscription, error) { - - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "RegistrarChanged", fromRule, toRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBRegistrarChanged) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "RegistrarChanged", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParseRegistrarChanged(log types.Log) (*KeeperRegistryVBRegistrarChanged, error) { - event := new(KeeperRegistryVBRegistrarChanged) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "RegistrarChanged", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBUnpausedIterator struct { - Event *KeeperRegistryVBUnpaused - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBUnpausedIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBUnpaused) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBUnpaused) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBUnpausedIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBUnpausedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBUnpaused struct { - Account common.Address - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryVBUnpausedIterator, error) { - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "Unpaused") - if err != nil { - return nil, err - } - return &KeeperRegistryVBUnpausedIterator{contract: _KeeperRegistryVB.contract, event: "Unpaused", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBUnpaused) (event.Subscription, error) { - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "Unpaused") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBUnpaused) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "Unpaused", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParseUnpaused(log types.Log) (*KeeperRegistryVBUnpaused, error) { - event := new(KeeperRegistryVBUnpaused) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "Unpaused", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBUpkeepCanceledIterator struct { - Event *KeeperRegistryVBUpkeepCanceled - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBUpkeepCanceledIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBUpkeepCanceled) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBUpkeepCanceled) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBUpkeepCanceledIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBUpkeepCanceledIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBUpkeepCanceled struct { - Id *big.Int - AtBlockHeight uint64 - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryVBUpkeepCanceledIterator, error) { - - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - var atBlockHeightRule []interface{} - for _, atBlockHeightItem := range atBlockHeight { - atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) - if err != nil { - return nil, err - } - return &KeeperRegistryVBUpkeepCanceledIterator{contract: _KeeperRegistryVB.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { - - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - var atBlockHeightRule []interface{} - for _, atBlockHeightItem := range atBlockHeight { - atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBUpkeepCanceled) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParseUpkeepCanceled(log types.Log) (*KeeperRegistryVBUpkeepCanceled, error) { - event := new(KeeperRegistryVBUpkeepCanceled) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBUpkeepPerformedIterator struct { - Event *KeeperRegistryVBUpkeepPerformed - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBUpkeepPerformedIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBUpkeepPerformed) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBUpkeepPerformed) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBUpkeepPerformedIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBUpkeepPerformedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBUpkeepPerformed struct { - Id *big.Int - Success bool - From common.Address - Payment *big.Int - PerformData []byte - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool, from []common.Address) (*KeeperRegistryVBUpkeepPerformedIterator, error) { - - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - var successRule []interface{} - for _, successItem := range success { - successRule = append(successRule, successItem) - } - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule, fromRule) - if err != nil { - return nil, err - } - return &KeeperRegistryVBUpkeepPerformedIterator{contract: _KeeperRegistryVB.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBUpkeepPerformed, id []*big.Int, success []bool, from []common.Address) (event.Subscription, error) { - - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - var successRule []interface{} - for _, successItem := range success { - successRule = append(successRule, successItem) - } - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule, fromRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBUpkeepPerformed) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParseUpkeepPerformed(log types.Log) (*KeeperRegistryVBUpkeepPerformed, error) { - event := new(KeeperRegistryVBUpkeepPerformed) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type KeeperRegistryVBUpkeepRegisteredIterator struct { - Event *KeeperRegistryVBUpkeepRegistered - - contract *bind.BoundContract - event string - - logs chan types.Log - sub ethereum.Subscription - done bool - fail error -} - -func (it *KeeperRegistryVBUpkeepRegisteredIterator) Next() bool { - - if it.fail != nil { - return false - } - - if it.done { - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBUpkeepRegistered) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - - select { - case log := <-it.logs: - it.Event = new(KeeperRegistryVBUpkeepRegistered) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -func (it *KeeperRegistryVBUpkeepRegisteredIterator) Error() error { - return it.fail -} - -func (it *KeeperRegistryVBUpkeepRegisteredIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -type KeeperRegistryVBUpkeepRegistered struct { - Id *big.Int - ExecuteGas uint32 - Admin common.Address - Raw types.Log -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryVBUpkeepRegisteredIterator, error) { - - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.FilterLogs(opts, "UpkeepRegistered", idRule) - if err != nil { - return nil, err - } - return &KeeperRegistryVBUpkeepRegisteredIterator{contract: _KeeperRegistryVB.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBUpkeepRegistered, id []*big.Int) (event.Subscription, error) { - - var idRule []interface{} - for _, idItem := range id { - idRule = append(idRule, idItem) - } - - logs, sub, err := _KeeperRegistryVB.contract.WatchLogs(opts, "UpkeepRegistered", idRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - - event := new(KeeperRegistryVBUpkeepRegistered) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -func (_KeeperRegistryVB *KeeperRegistryVBFilterer) ParseUpkeepRegistered(log types.Log) (*KeeperRegistryVBUpkeepRegistered, error) { - event := new(KeeperRegistryVBUpkeepRegistered) - if err := _KeeperRegistryVB.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -type GetConfig struct { - PaymentPremiumPPB uint32 - BlockCountPerTurn *big.Int - CheckGasLimit uint32 - StalenessSeconds *big.Int - GasCeilingMultiplier uint16 - FallbackGasPrice *big.Int - FallbackLinkPrice *big.Int -} -type GetKeeperInfo struct { - Payee common.Address - Active bool - Balance *big.Int -} -type GetUpkeep struct { - Target common.Address - ExecuteGas uint32 - CheckData []byte - Balance *big.Int - LastKeeper common.Address - Admin common.Address - MaxValidBlocknumber uint64 -} - -func (_KeeperRegistryVB *KeeperRegistryVB) ParseLog(log types.Log) (generated.AbigenLog, error) { - switch log.Topics[0] { - case _KeeperRegistryVB.abi.Events["ConfigSet"].ID: - return _KeeperRegistryVB.ParseConfigSet(log) - case _KeeperRegistryVB.abi.Events["FlatFeeSet"].ID: - return _KeeperRegistryVB.ParseFlatFeeSet(log) - case _KeeperRegistryVB.abi.Events["FundsAdded"].ID: - return _KeeperRegistryVB.ParseFundsAdded(log) - case _KeeperRegistryVB.abi.Events["FundsWithdrawn"].ID: - return _KeeperRegistryVB.ParseFundsWithdrawn(log) - case _KeeperRegistryVB.abi.Events["KeepersUpdated"].ID: - return _KeeperRegistryVB.ParseKeepersUpdated(log) - case _KeeperRegistryVB.abi.Events["OwnershipTransferRequested"].ID: - return _KeeperRegistryVB.ParseOwnershipTransferRequested(log) - case _KeeperRegistryVB.abi.Events["OwnershipTransferred"].ID: - return _KeeperRegistryVB.ParseOwnershipTransferred(log) - case _KeeperRegistryVB.abi.Events["Paused"].ID: - return _KeeperRegistryVB.ParsePaused(log) - case _KeeperRegistryVB.abi.Events["PayeeshipTransferRequested"].ID: - return _KeeperRegistryVB.ParsePayeeshipTransferRequested(log) - case _KeeperRegistryVB.abi.Events["PayeeshipTransferred"].ID: - return _KeeperRegistryVB.ParsePayeeshipTransferred(log) - case _KeeperRegistryVB.abi.Events["PaymentWithdrawn"].ID: - return _KeeperRegistryVB.ParsePaymentWithdrawn(log) - case _KeeperRegistryVB.abi.Events["RegistrarChanged"].ID: - return _KeeperRegistryVB.ParseRegistrarChanged(log) - case _KeeperRegistryVB.abi.Events["Unpaused"].ID: - return _KeeperRegistryVB.ParseUnpaused(log) - case _KeeperRegistryVB.abi.Events["UpkeepCanceled"].ID: - return _KeeperRegistryVB.ParseUpkeepCanceled(log) - case _KeeperRegistryVB.abi.Events["UpkeepPerformed"].ID: - return _KeeperRegistryVB.ParseUpkeepPerformed(log) - case _KeeperRegistryVB.abi.Events["UpkeepRegistered"].ID: - return _KeeperRegistryVB.ParseUpkeepRegistered(log) - - default: - return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) - } -} - -func (KeeperRegistryVBConfigSet) Topic() common.Hash { - return common.HexToHash("0x6db8cdacf21c3bbd6135926f497c6fba81fd6969684ecf85f56550d2b1f8e691") -} - -func (KeeperRegistryVBFlatFeeSet) Topic() common.Hash { - return common.HexToHash("0x17b46a44a823646eef686b7824df2962de896bc9a012a60b67694c5cbf184d8b") -} - -func (KeeperRegistryVBFundsAdded) Topic() common.Hash { - return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") -} - -func (KeeperRegistryVBFundsWithdrawn) Topic() common.Hash { - return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") -} - -func (KeeperRegistryVBKeepersUpdated) Topic() common.Hash { - return common.HexToHash("0x056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f") -} - -func (KeeperRegistryVBOwnershipTransferRequested) Topic() common.Hash { - return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") -} - -func (KeeperRegistryVBOwnershipTransferred) Topic() common.Hash { - return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") -} - -func (KeeperRegistryVBPaused) Topic() common.Hash { - return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") -} - -func (KeeperRegistryVBPayeeshipTransferRequested) Topic() common.Hash { - return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") -} - -func (KeeperRegistryVBPayeeshipTransferred) Topic() common.Hash { - return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") -} - -func (KeeperRegistryVBPaymentWithdrawn) Topic() common.Hash { - return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") -} - -func (KeeperRegistryVBRegistrarChanged) Topic() common.Hash { - return common.HexToHash("0x9bf4a5b30267728df68663e14adb47e559863967c419dc6030638883408bed2e") -} - -func (KeeperRegistryVBUnpaused) Topic() common.Hash { - return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") -} - -func (KeeperRegistryVBUpkeepCanceled) Topic() common.Hash { - return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") -} - -func (KeeperRegistryVBUpkeepPerformed) Topic() common.Hash { - return common.HexToHash("0xcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6") -} - -func (KeeperRegistryVBUpkeepRegistered) Topic() common.Hash { - return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") -} - -func (_KeeperRegistryVB *KeeperRegistryVB) Address() common.Address { - return _KeeperRegistryVB.address -} - -type KeeperRegistryVBInterface interface { - FASTGASFEED(opts *bind.CallOpts) (common.Address, error) - - LINK(opts *bind.CallOpts) (common.Address, error) - - LINKETHFEED(opts *bind.CallOpts) (common.Address, error) - - GetCanceledUpkeepList(opts *bind.CallOpts) ([]*big.Int, error) - - GetConfig(opts *bind.CallOpts) (GetConfig, - - error) - - GetFlatFee(opts *bind.CallOpts) (uint32, error) - - GetKeeperInfo(opts *bind.CallOpts, query common.Address) (GetKeeperInfo, - - error) - - GetKeeperList(opts *bind.CallOpts) ([]common.Address, error) - - GetMaxPaymentForGas(opts *bind.CallOpts, gasLimit *big.Int) (*big.Int, error) - - GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) - - GetMustTakeTurns(opts *bind.CallOpts) (bool, error) - - GetRegistrar(opts *bind.CallOpts) (common.Address, error) - - GetUpkeep(opts *bind.CallOpts, id *big.Int) (GetUpkeep, - - error) - - GetUpkeepCount(opts *bind.CallOpts) (*big.Int, error) - - Owner(opts *bind.CallOpts) (common.Address, error) - - Paused(opts *bind.CallOpts) (bool, error) - - TypeAndVersion(opts *bind.CallOpts) (string, error) - - AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) - - AcceptPayeeship(opts *bind.TransactOpts, keeper common.Address) (*types.Transaction, error) - - AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) - - CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) - - CheckUpkeep(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) - - OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) - - Pause(opts *bind.TransactOpts) (*types.Transaction, error) - - PerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) - - RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) - - RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) - - SetConfig(opts *bind.TransactOpts, paymentPremiumPPB uint32, flatFeeMicroLink uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int, mustTakeTurns bool) (*types.Transaction, error) - - SetKeepers(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) - - SetRegistrar(opts *bind.TransactOpts, registrar common.Address) (*types.Transaction, error) - - TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) - - TransferPayeeship(opts *bind.TransactOpts, keeper common.Address, proposed common.Address) (*types.Transaction, error) - - Unpause(opts *bind.TransactOpts) (*types.Transaction, error) - - WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) - - WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) - - FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryVBConfigSetIterator, error) - - WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBConfigSet) (event.Subscription, error) - - ParseConfigSet(log types.Log) (*KeeperRegistryVBConfigSet, error) - - FilterFlatFeeSet(opts *bind.FilterOpts) (*KeeperRegistryVBFlatFeeSetIterator, error) - - WatchFlatFeeSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBFlatFeeSet) (event.Subscription, error) - - ParseFlatFeeSet(log types.Log) (*KeeperRegistryVBFlatFeeSet, error) - - FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryVBFundsAddedIterator, error) - - WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) - - ParseFundsAdded(log types.Log) (*KeeperRegistryVBFundsAdded, error) - - FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryVBFundsWithdrawnIterator, error) - - WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBFundsWithdrawn, id []*big.Int) (event.Subscription, error) - - ParseFundsWithdrawn(log types.Log) (*KeeperRegistryVBFundsWithdrawn, error) - - FilterKeepersUpdated(opts *bind.FilterOpts) (*KeeperRegistryVBKeepersUpdatedIterator, error) - - WatchKeepersUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBKeepersUpdated) (event.Subscription, error) - - ParseKeepersUpdated(log types.Log) (*KeeperRegistryVBKeepersUpdated, error) - - FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryVBOwnershipTransferRequestedIterator, error) - - WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) - - ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryVBOwnershipTransferRequested, error) - - FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryVBOwnershipTransferredIterator, error) - - WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) - - ParseOwnershipTransferred(log types.Log) (*KeeperRegistryVBOwnershipTransferred, error) - - FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryVBPausedIterator, error) - - WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBPaused) (event.Subscription, error) - - ParsePaused(log types.Log) (*KeeperRegistryVBPaused, error) - - FilterPayeeshipTransferRequested(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryVBPayeeshipTransferRequestedIterator, error) - - WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBPayeeshipTransferRequested, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) - - ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryVBPayeeshipTransferRequested, error) - - FilterPayeeshipTransferred(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryVBPayeeshipTransferredIterator, error) - - WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBPayeeshipTransferred, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) - - ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryVBPayeeshipTransferred, error) - - FilterPaymentWithdrawn(opts *bind.FilterOpts, keeper []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryVBPaymentWithdrawnIterator, error) - - WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBPaymentWithdrawn, keeper []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) - - ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryVBPaymentWithdrawn, error) - - FilterRegistrarChanged(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryVBRegistrarChangedIterator, error) - - WatchRegistrarChanged(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBRegistrarChanged, from []common.Address, to []common.Address) (event.Subscription, error) - - ParseRegistrarChanged(log types.Log) (*KeeperRegistryVBRegistrarChanged, error) - - FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryVBUnpausedIterator, error) - - WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBUnpaused) (event.Subscription, error) - - ParseUnpaused(log types.Log) (*KeeperRegistryVBUnpaused, error) - - FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryVBUpkeepCanceledIterator, error) - - WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) - - ParseUpkeepCanceled(log types.Log) (*KeeperRegistryVBUpkeepCanceled, error) - - FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool, from []common.Address) (*KeeperRegistryVBUpkeepPerformedIterator, error) - - WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBUpkeepPerformed, id []*big.Int, success []bool, from []common.Address) (event.Subscription, error) - - ParseUpkeepPerformed(log types.Log) (*KeeperRegistryVBUpkeepPerformed, error) - - FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryVBUpkeepRegisteredIterator, error) - - WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryVBUpkeepRegistered, id []*big.Int) (event.Subscription, error) - - ParseUpkeepRegistered(log types.Log) (*KeeperRegistryVBUpkeepRegistered, error) - - ParseLog(log types.Log) (generated.AbigenLog, error) - - Address() common.Address -} diff --git a/core/internal/gethwrappers/generated/keeper_registry_wrapper/keeper_registry_wrapper.go b/core/internal/gethwrappers/generated/keeper_registry_wrapper/keeper_registry_wrapper.go index b0bda8a5ea0..58fb94b8541 100644 --- a/core/internal/gethwrappers/generated/keeper_registry_wrapper/keeper_registry_wrapper.go +++ b/core/internal/gethwrappers/generated/keeper_registry_wrapper/keeper_registry_wrapper.go @@ -31,7 +31,7 @@ var ( var KeeperRegistryMetaData = &bind.MetaData{ ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkEthFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"fastGasFeed\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"}],\"name\":\"FlatFeeSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"KeepersUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"RegistrarChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"FAST_GAS_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"LINK\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"LINK_ETH_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"cancelUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"maxLinkPayment\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"adjustedGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkEth\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCanceledUpkeepList\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFlatFee\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getKeeperInfo\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getKeeperList\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"getMaxPaymentForGas\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"maxPayment\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"minBalance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRegistrar\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getUpkeep\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"lastKeeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getUpkeepCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setKeepers\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"name\":\"setRegistrar\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x60e06040523480156200001157600080fd5b50604051620053b3380380620053b383398181016040526101608110156200003857600080fd5b508051602082015160408301516060840151608085015160a086015160c087015160e08801516101008901516101208a0151610140909a0151989997989697959694959394929391929091903380600081620000db576040805162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f0000000000000000604482015290519081900360640190fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156200010e576200010e8162000165565b50506001600255506003805460ff191690556001600160601b031960608c811b82166080528b811b821660a0528a901b1660c05262000154888888888888888862000215565b505050505050505050505062000487565b6001600160a01b038116331415620001c4576040805162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6200021f62000425565b6040518060c001604052808963ffffffff1681526020018863ffffffff1681526020018762ffffff1681526020018663ffffffff1681526020018562ffffff1681526020018461ffff16815250600b60008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548162ffffff021916908362ffffff160217905550606082015181600001600b6101000a81548163ffffffff021916908363ffffffff160217905550608082015181600001600f6101000a81548162ffffff021916908362ffffff16021790555060a08201518160000160126101000a81548161ffff021916908361ffff16021790555090505081600c8190555080600d819055507feb3c06937e6595fd80ec1add18a195026d5cf65f122cc3ffedbfb18a9ed80b3988878787878787604051808863ffffffff1681526020018762ffffff1681526020018663ffffffff1681526020018562ffffff1681526020018461ffff16815260200183815260200182815260200197505050505050505060405180910390a16040805163ffffffff8916815290517f17b46a44a823646eef686b7824df2962de896bc9a012a60b67694c5cbf184d8b9181900360200190a15050505050505050565b6000546001600160a01b0316331462000485576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b565b60805160601c60a05160601c60c05160601c614ece620004e560003980610d0052806141725250806119eb5280614245525080610bf55280610fa652806113a2528061147752806119485280611b825280611c505250614ece6000f3fe608060405234801561001057600080fd5b50600436106102265760003560e01c8063a4c0ed361161012a578063c41b813a116100bd578063db30a3861161008c578063f2fde38b11610071578063f2fde38b14610add578063faab9d3914610b10578063fecf27c914610b4357610226565b8063db30a38614610a41578063eb5dcd6c14610aa257610226565b8063c41b813a14610784578063c7c3a19a14610851578063c804802214610970578063da5c67411461098d57610226565b8063b657bc9c116100f9578063b657bc9c14610645578063b79550be14610662578063b7fdb4361461066a578063c3f909d41461072c57610226565b8063a4c0ed361461053d578063a710b221146105cf578063ad1783611461060a578063b121e1471461061257610226565b80635c975abb116101bd5780638456cb591161018c5780638da5cb5b116101715780638da5cb5b146104c657806393f0c1fc146104ce578063948108f71461050c57610226565b80638456cb591461049d5780638a601fc8146104a557610226565b80635c975abb146103c9578063744bfe61146103e557806379ba50971461041e5780637bbaf1ea1461042657610226565b80632cb6864d116101f95780632cb6864d146103a75780633f4ba83a146103af5780634584a419146103b95780634d3f7334146103c157610226565b806315a126ea1461022b578063181f5a77146102835780631b6b6d23146103005780631e12b8a514610331575b600080fd5b610233610b4b565b60408051602080825283518183015283519192839290830191858101910280838360005b8381101561026f578181015183820152602001610257565b505050509050019250505060405180910390f35b61028b610bba565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102c55781810151838201526020016102ad565b50505050905090810190601f1680156102f25780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610308610bf3565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190f35b6103646004803603602081101561034757600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610c17565b6040805173ffffffffffffffffffffffffffffffffffffffff909416845291151560208401526bffffffffffffffffffffffff1682820152519081900360600190f35b610233610c95565b6103b7610cec565b005b610308610cfe565b610308610d22565b6103d1610d3e565b604080519115158252519081900360200190f35b6103b7600480360360408110156103fb57600080fd5b508035906020013573ffffffffffffffffffffffffffffffffffffffff16610d47565b6103b7611067565b6103d16004803603604081101561043c57600080fd5b8135919081019060408101602082013564010000000081111561045e57600080fd5b82018360208201111561047057600080fd5b8035906020019184600183028401116401000000008311171561049257600080fd5b509092509050611169565b6103b76111bf565b6104ad6111cf565b6040805163ffffffff9092168252519081900360200190f35b6103086111e3565b6104eb600480360360208110156104e457600080fd5b50356111ff565b604080516bffffffffffffffffffffffff9092168252519081900360200190f35b6103b76004803603604081101561052257600080fd5b50803590602001356bffffffffffffffffffffffff16611235565b6103b76004803603606081101561055357600080fd5b73ffffffffffffffffffffffffffffffffffffffff8235169160208101359181019060608101604082013564010000000081111561059057600080fd5b8201836020820111156105a257600080fd5b803590602001918460018302840111640100000000831117156105c457600080fd5b50909250905061145f565b6103b7600480360360408110156105e557600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166116fb565b6103086119e9565b6103b76004803603602081101561062857600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16611a0d565b6104eb6004803603602081101561065b57600080fd5b5035611b3a565b6103b7611b76565b6103b76004803603604081101561068057600080fd5b81019060208101813564010000000081111561069b57600080fd5b8201836020820111156106ad57600080fd5b803590602001918460208302840111640100000000831117156106cf57600080fd5b9193909290916020810190356401000000008111156106ed57600080fd5b8201836020820111156106ff57600080fd5b8035906020019184602083028401116401000000008311171561072157600080fd5b509092509050611d06565b610734612223565b6040805163ffffffff988916815262ffffff9788166020820152959097168588015292909416606084015261ffff16608083015260a082019290925260c081019190915290519081900360e00190f35b6107bd6004803603604081101561079a57600080fd5b508035906020013573ffffffffffffffffffffffffffffffffffffffff166122cc565b6040518080602001868152602001858152602001848152602001838152602001828103825287818151815260200191508051906020019080838360005b838110156108125781810151838201526020016107fa565b50505050905090810190601f16801561083f5780820380516001836020036101000a031916815260200191505b50965050505050505060405180910390f35b61086e6004803603602081101561086757600080fd5b503561296b565b604051808873ffffffffffffffffffffffffffffffffffffffff1681526020018763ffffffff16815260200180602001866bffffffffffffffffffffffff1681526020018573ffffffffffffffffffffffffffffffffffffffff1681526020018473ffffffffffffffffffffffffffffffffffffffff1681526020018367ffffffffffffffff168152602001828103825287818151815260200191508051906020019080838360005b8381101561092f578181015183820152602001610917565b50505050905090810190601f16801561095c5780820380516001836020036101000a031916815260200191505b509850505050505050505060405180910390f35b6103b76004803603602081101561098657600080fd5b5035612b14565b610a2f600480360360808110156109a357600080fd5b73ffffffffffffffffffffffffffffffffffffffff823581169263ffffffff602082013516926040820135909216918101906080810160608201356401000000008111156109f057600080fd5b820183602082011115610a0257600080fd5b80359060200191846001830284011164010000000083111715610a2457600080fd5b509092509050612d75565b60408051918252519081900360200190f35b6103b76004803603610100811015610a5857600080fd5b5063ffffffff8135811691602081013582169162ffffff604083013581169260608101359092169160808101359091169061ffff60a0820135169060c08101359060e001356131c1565b6103b760048036036040811015610ab857600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166133cf565b6103b760048036036020811015610af357600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16613599565b6103b760048036036020811015610b2657600080fd5b503573ffffffffffffffffffffffffffffffffffffffff166135ad565b610a2f61375c565b60606006805480602002602001604051908101604052809291908181526020018280548015610bb057602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610b85575b5050505050905090565b6040518060400160405280601481526020017f4b6565706572526567697374727920312e312e3000000000000000000000000081525081565b7f000000000000000000000000000000000000000000000000000000000000000081565b73ffffffffffffffffffffffffffffffffffffffff90811660009081526008602090815260409182902082516060810184528154948516808252740100000000000000000000000000000000000000009095046bffffffffffffffffffffffff1692810183905260019091015460ff16151592018290529192909190565b60606005805480602002602001604051908101604052809291908181526020018280548015610bb057602002820191906000526020600020905b815481526020019060010190808311610ccf575050505050905090565b610cf4613762565b610cfc6137e8565b565b7f000000000000000000000000000000000000000000000000000000000000000081565b600f5473ffffffffffffffffffffffffffffffffffffffff1690565b60035460ff1690565b8073ffffffffffffffffffffffffffffffffffffffff8116610dca57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f63616e6e6f742073656e6420746f207a65726f20616464726573730000000000604482015290519081900360640190fd5b6000838152600760205260409020600101546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff163314610e6f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c652062792061646d696e00000000000000000000604482015290519081900360640190fd5b6000838152600760205260409020600201544367ffffffffffffffff9091161115610efb57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f75706b656570206d7573742062652063616e63656c6564000000000000000000604482015290519081900360640190fd5b600083815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff0000000000000000000000008116909155600e546bffffffffffffffffffffffff90911690610f5290826138d6565b600e556040805182815273ffffffffffffffffffffffffffffffffffffffff85166020820152815186927ff3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318928290030190a27f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb84836040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b15801561103557600080fd5b505af1158015611049573d6000803e3d6000fd5b505050506040513d602081101561105f57600080fd5b505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146110ed57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015290519081900360640190fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60006111b76111b2338686868080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506001925061394d915050565b613a0e565b949350505050565b6111c7613762565b610cfc614077565b600b54640100000000900463ffffffff1690565b60005473ffffffffffffffffffffffffffffffffffffffff1690565b600080600061120c61413f565b91509150600061121d83600061431e565b905061122a858284614364565b93505050505b919050565b60008281526007602052604090206002015467ffffffffffffffff908116146112bf57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f75706b656570206d757374206265206163746976650000000000000000000000604482015290519081900360640190fd5b6000828152600760205260409020600101546112e9906bffffffffffffffffffffffff1682614513565b600083815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff928316179055600e5461133f91831661459f565b600e55604080517f23b872dd0000000000000000000000000000000000000000000000000000000081523360048201523060248201526bffffffffffffffffffffffff83166044820152905173ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016916323b872dd9160648083019260209291908290030181600087803b1580156113ea57600080fd5b505af11580156113fe573d6000803e3d6000fd5b505050506040513d602081101561141457600080fd5b5050604080516bffffffffffffffffffffffff831681529051339184917fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039181900360200190a35050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461150357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6f6e6c792063616c6c61626c65207468726f756768204c494e4b000000000000604482015290519081900360640190fd5b6020811461157257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f64617461206d7573742062652033322062797465730000000000000000000000604482015290519081900360640190fd5b60008282602081101561158457600080fd5b503560008181526007602052604090206002015490915067ffffffffffffffff9081161461161357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f75706b656570206d757374206265206163746976650000000000000000000000604482015290519081900360640190fd5b60008181526007602052604090206001015461163d906bffffffffffffffffffffffff1685614513565b600082815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff92909216919091179055600e54611696908561459f565b600e55604080516bffffffffffffffffffffffff86168152905173ffffffffffffffffffffffffffffffffffffffff87169183917fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039181900360200190a35050505050565b8073ffffffffffffffffffffffffffffffffffffffff811661177e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f63616e6e6f742073656e6420746f207a65726f20616464726573730000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff83811660009081526008602090815260409182902082516060810184528154948516808252740100000000000000000000000000000000000000009095046bffffffffffffffffffffffff16928101929092526001015460ff1615159181019190915290331461186457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c6520627920706179656500000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff80851660009081526008602090815260409091208054909216909155810151600e546118b2916bffffffffffffffffffffffff166138d6565b600e819055508273ffffffffffffffffffffffffffffffffffffffff1681602001516bffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167f9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f4069833604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a47f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb8483602001516040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff168152602001826bffffffffffffffffffffffff16815260200192505050602060405180830381600087803b15801561103557600080fd5b7f000000000000000000000000000000000000000000000000000000000000000081565b73ffffffffffffffffffffffffffffffffffffffff818116600090815260096020526040902054163314611aa257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f6f6e6c792063616c6c61626c652062792070726f706f73656420706179656500604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff81811660008181526008602090815260408083208054337fffffffffffffffffffffffff000000000000000000000000000000000000000080831682179093556009909452828520805490921690915590519416939092849290917f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b39190a45050565b600081815260076020526040812054611b709074010000000000000000000000000000000000000000900463ffffffff166111ff565b92915050565b611b7e613762565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166370a08231306040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611c0757600080fd5b505afa158015611c1b573d6000803e3d6000fd5b505050506040513d6020811015611c3157600080fd5b5051600e5490915073ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb903390611c849085906138d6565b6040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b158015611cd757600080fd5b505af1158015611ceb573d6000803e3d6000fd5b505050506040513d6020811015611d0157600080fd5b505050565b611d0e613762565b828114611d66576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526021815260200180614ea16021913960400191505060405180910390fd5b6002831015611dd657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6e6f7420656e6f756768206b6565706572730000000000000000000000000000604482015290519081900360640190fd5b60005b600654811015611e5657600060068281548110611df257fe5b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff1682526008905260409020600190810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055919091019050611dd9565b5060005b83811015612140576000858583818110611e7057fe5b73ffffffffffffffffffffffffffffffffffffffff6020918202939093013583166000818152600890925260408220805491955093169150868686818110611eb457fe5b9050602002013573ffffffffffffffffffffffffffffffffffffffff169050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415611f59576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526024815260200180614e396024913960400191505060405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff82161580611fa757508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16145b80611fc7575073ffffffffffffffffffffffffffffffffffffffff818116145b61203257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f63616e6e6f74206368616e676520706179656500000000000000000000000000604482015290519081900360640190fd5b600183015460ff16156120a657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f63616e6e6f7420616464206b6565706572207477696365000000000000000000604482015290519081900360640190fd5b600183810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016909117905573ffffffffffffffffffffffffffffffffffffffff818116146121305782547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff82161783555b505060019092019150611e5a9050565b5061214d60068585614cae565b507f056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f848484846040518080602001806020018381038352878782818152602001925060200280828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169091018481038352858152602090810191508690860280828437600083820152604051601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018290039850909650505050505050a150505050565b6040805160c081018252600b5463ffffffff80821680845264010000000083048216602085015268010000000000000000830462ffffff9081169585018690526b0100000000000000000000008404909216606085018190526f01000000000000000000000000000000840490921660808501819052720100000000000000000000000000000000000090930461ffff1660a0909401849052600c54600d549196929492909190565b60606000806000806122dc610d3e565b1561234857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a2070617573656400000000000000000000000000000000604482015290519081900360640190fd5b612350614613565b6000878152600760209081526040808320815160c081018352815473ffffffffffffffffffffffffffffffffffffffff80821683527401000000000000000000000000000000000000000090910463ffffffff16828601526001808401546bffffffffffffffffffffffff8116848701526c0100000000000000000000000090048216606084015260029384015467ffffffffffffffff8116608085015268010000000000000000900490911660a08301528c8652600a8552838620935160248101958652845461010092811615929092027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190911692909204604483018190529094937f6e04ff0d0000000000000000000000000000000000000000000000000000000093929091829160640190849080156124cf5780601f106124a4576101008083540402835291602001916124cf565b820191906000526020600020905b8154815290600101906020018083116124b257829003601f168201915b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009097169690961786528751600b549151835193985060009788975073ffffffffffffffffffffffffffffffffffffffff909216955063ffffffff6b01000000000000000000000090930492909216935087928291908083835b602083106125da57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161259d565b6001836020036101000a03801982511681845116808217855250505050505090500191505060006040518083038160008787f1925050503d806000811461263d576040519150601f19603f3d011682016040523d82523d6000602084013e612642565b606091505b5091509150816127dd57600061265782614680565b905060008160405160200180807f63616c6c20746f20636865636b20746172676574206661696c65643a20000000815250601d0182805190602001908083835b602083106126d457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101612697565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff018019909216911617905260408051929094018281037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0018352938490527f08c379a00000000000000000000000000000000000000000000000000000000084526004840181815282516024860152825192975087965094508493604401925085019080838360005b838110156127a257818101518382015260200161278a565b50505050905090810190601f1680156127cf5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b8080602001905160408110156127f257600080fd5b81516020830180516040519294929383019291908464010000000082111561281957600080fd5b90830190602082018581111561282e57600080fd5b825164010000000081118282018810171561284857600080fd5b82525081516020918201929091019080838360005b8381101561287557818101518382015260200161285d565b50505050905090810190601f1680156128a25780820380516001836020036101000a031916815260200191505b50604052505050809a5081935050508161291d57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f75706b656570206e6f74206e6565646564000000000000000000000000000000604482015290519081900360640190fd5b600061292c8b8d8c600061394d565b90506129418582600001518360600151614797565b6060810151608082015160a083015160c0909301519b9e919d509b50909998509650505050505050565b6000818152600760209081526040808320815160c081018352815473ffffffffffffffffffffffffffffffffffffffff8082168084527401000000000000000000000000000000000000000090920463ffffffff168387018190526001808601546bffffffffffffffffffffffff81168689019081526c010000000000000000000000009091048416606080880191825260029889015467ffffffffffffffff811660808a019081526801000000000000000090910490961660a089019081528d8d52600a8c528a8d20935190519251965184548c5161010097821615979097027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01169a909a04601f81018d90048d0286018d01909b528a85528c9b919a8c9a8b9a8b9a8b9a91999098909796949591939091879190830182828015612af35780601f10612ac857610100808354040283529160200191612af3565b820191906000526020600020905b815481529060010190602001808311612ad657829003601f168201915b50505050509450975097509750975097509750975050919395979092949650565b60008181526007602052604081206002015467ffffffffffffffff9081169190821490612b3f6111e3565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161490508180612b8c5750808015612b8c5750438367ffffffffffffffff16115b612bf757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f746f6f206c61746520746f2063616e63656c2075706b65657000000000000000604482015290519081900360640190fd5b8080612c3957506000848152600760205260409020600101546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1633145b612ca457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f6f6e6c79206f776e6572206f722061646d696e00000000000000000000000000604482015290519081900360640190fd5b4381612cb857612cb581603261459f565b90505b600085815260076020526040902060020180547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff83161790558215612d3757600580546001810182556000919091527f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0018590555b60405167ffffffffffffffff82169086907f91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f79118190600090a35050505050565b6000612d7f6111e3565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161480612dcf5750600f5473ffffffffffffffffffffffffffffffffffffffff1633145b612e24576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180614e5d6023913960400191505060405180910390fd5b612e438673ffffffffffffffffffffffffffffffffffffffff1661494e565b612eae57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f746172676574206973206e6f74206120636f6e74726163740000000000000000604482015290519081900360640190fd5b6108fc8563ffffffff161015612f2557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600f60248201527f6d696e2067617320697320323330300000000000000000000000000000000000604482015290519081900360640190fd5b624c4b408563ffffffff161115612f9d57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6d61782067617320697320353030303030300000000000000000000000000000604482015290519081900360640190fd5b506004546040805160c08101825273ffffffffffffffffffffffffffffffffffffffff808916825263ffffffff808916602080850191825260008587018181528b86166060880190815267ffffffffffffffff6080890181815260a08a018581528c8652600787528b86209a518b54985190991674010000000000000000000000000000000000000000027fffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffff998b167fffffffffffffffffffffffff000000000000000000000000000000000000000090991698909817989098169690961789559151600189018054925189166c01000000000000000000000000026bffffffffffffffffffffffff9283167fffffffffffffffffffffffffffffffffffffffff00000000000000000000000090941693909317909116919091179055925160029096018054945190951668010000000000000000027fffffffff0000000000000000000000000000000000000000ffffffffffffffff969093167fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000909416939093179490941617909155600a90915220613159908484614d36565b506004805460010190556040805163ffffffff8716815273ffffffffffffffffffffffffffffffffffffffff86166020820152815183927fbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012928290030190a295945050505050565b6131c9613762565b6040518060c001604052808963ffffffff1681526020018863ffffffff1681526020018762ffffff1681526020018663ffffffff1681526020018562ffffff1681526020018461ffff16815250600b60008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548162ffffff021916908362ffffff160217905550606082015181600001600b6101000a81548163ffffffff021916908363ffffffff160217905550608082015181600001600f6101000a81548162ffffff021916908362ffffff16021790555060a08201518160000160126101000a81548161ffff021916908361ffff16021790555090505081600c8190555080600d819055507feb3c06937e6595fd80ec1add18a195026d5cf65f122cc3ffedbfb18a9ed80b3988878787878787604051808863ffffffff1681526020018762ffffff1681526020018663ffffffff1681526020018562ffffff1681526020018461ffff16815260200183815260200182815260200197505050505050505060405180910390a16040805163ffffffff8916815290517f17b46a44a823646eef686b7824df2962de896bc9a012a60b67694c5cbf184d8b9181900360200190a15050505050505050565b73ffffffffffffffffffffffffffffffffffffffff82811660009081526008602052604090205416331461346457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c6520627920706179656500000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff81163314156134e957604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f63616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600960205260409020548116908216146135955773ffffffffffffffffffffffffffffffffffffffff82811660008181526009602052604080822080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169486169485179055513392917f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836791a45b5050565b6135a1613762565b6135aa81614954565b50565b6135b56111e3565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806136055750600f5473ffffffffffffffffffffffffffffffffffffffff1633145b61365a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180614e5d6023913960400191505060405180910390fd5b600f5473ffffffffffffffffffffffffffffffffffffffff9081169082168114156136e657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600e60248201527f53616d6520726567697374726172000000000000000000000000000000000000604482015290519081900360640190fd5b600f80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff84811691821790925560405190918316907f9bf4a5b30267728df68663e14adb47e559863967c419dc6030638883408bed2e90600090a35050565b60045490565b60005473ffffffffffffffffffffffffffffffffffffffff163314610cfc57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b6137f0610d3e565b61385b57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f5061757361626c653a206e6f7420706175736564000000000000000000000000604482015290519081900360640190fd5b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa6138ac614a4f565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190a1565b60008282111561394757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b50900390565b613955614dd0565b60008481526007602052604081205474010000000000000000000000000000000000000000900463ffffffff16908061398c61413f565b91509150600061399c838761431e565b905060006139ab858385614364565b6040805160e08101825273ffffffffffffffffffffffffffffffffffffffff8d168152602081018c90529081018a90526bffffffffffffffffffffffff909116606082015260808101959095525060a084015260c0830152509050949350505050565b6000600280541415613a8157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f5265656e7472616e637947756172643a207265656e7472616e742063616c6c00604482015290519081900360640190fd5b600280556020820151613a9381614a53565b602083810151600090815260078252604090819020815160c081018352815473ffffffffffffffffffffffffffffffffffffffff80821683527401000000000000000000000000000000000000000090910463ffffffff169482019490945260018201546bffffffffffffffffffffffff8116938201939093526c01000000000000000000000000909204831660608084019190915260029091015467ffffffffffffffff8116608084015268010000000000000000900490921660a08201528451918501519091613b6791839190614797565b60005a90506000634585e33b60e01b86604001516040516024018080602001828103825283818151815260200191508051906020019080838360005b83811015613bbb578181015183820152602001613ba3565b50505050905090810190601f168015613be85780820380516001836020036101000a031916815260200191505b50604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009096169590951790945250505060808701518451919250613c7d9183614ade565b94505a820391506000613c99838860a001518960c00151614364565b6040850151909150613cb9906bffffffffffffffffffffffff1682614b2a565b84604001906bffffffffffffffffffffffff1690816bffffffffffffffffffffffff168152505086600001518460a0019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505083600760008960200151815260200190815260200160002060008201518160000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060208201518160000160146101000a81548163ffffffff021916908363ffffffff16021790555060408201518160010160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550606082015181600101600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060808201518160020160006101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555060a08201518160020160086101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050506000613f1a82600860008b6000015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060000160149054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff1661451390919063ffffffff16565b905080600860008a6000015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060000160146101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550876000015173ffffffffffffffffffffffffffffffffffffffff1687151589602001517fcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6858c6040015160405180836bffffffffffffffffffffffff16815260200180602001828103825283818151815260200191508051906020019080838360005b8381101561402c578181015183820152602001614014565b50505050905090810190601f1680156140595780820380516001836020036101000a031916815260200191505b50935050505060405180910390a45050505050506001600255919050565b61407f610d3e565b156140eb57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a2070617573656400000000000000000000000000000000604482015290519081900360640190fd5b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a2586138ac614a4f565b6000806000600b600001600f9054906101000a900462ffffff1662ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b1580156141d657600080fd5b505afa1580156141ea573d6000803e3d6000fd5b505050506040513d60a081101561420057600080fd5b5060208101516060909101519250905082801561422457508142038463ffffffff16105b80614230575060008113155b1561423f57600c549550614243565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b1580156142a957600080fd5b505afa1580156142bd573d6000803e3d6000fd5b505050506040513d60a08110156142d357600080fd5b506020810151606090910151925090508280156142f757508142038463ffffffff16105b80614303575060008113155b1561431257600d549450614316565b8094505b505050509091565b600b546000906143499084907201000000000000000000000000000000000000900461ffff16614bb7565b90508180156143575750803a105b15611b7057503a92915050565b6040805160c081018252600b5463ffffffff808216835264010000000082048116602084015262ffffff6801000000000000000083048116948401949094526b0100000000000000000000008204811660608401526f010000000000000000000000000000008204909316608083015261ffff72010000000000000000000000000000000000009091041660a082015260009182906144159061440e908890620138809061459f16565b8690614bb7565b9050600061443a836000015163ffffffff16633b9aca0061459f90919063ffffffff16565b9050600061448b61446364e8d4a51000866020015163ffffffff16614bb790919063ffffffff16565b6144858861447f8661447989633b9aca00614bb7565b90614bb7565b90614c2a565b9061459f565b90506b033b2e3c9fd0803ce800000081111561450857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f7061796d656e742067726561746572207468616e20616c6c204c494e4b000000604482015290519081900360640190fd5b979650505050505050565b60008282016bffffffffffffffffffffffff808516908216101561459857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b9392505050565b60008282018381101561459857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b3215610cfc57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6f6e6c7920666f722073696d756c61746564206261636b656e64000000000000604482015290519081900360640190fd5b60606044825110156146c6575060408051808201909152601d81527f7472616e73616374696f6e2072657665727465642073696c656e746c790000006020820152611230565b60048201805190926024019060208110156146e057600080fd5b810190808051604051939291908464010000000082111561470057600080fd5b90830190602082018581111561471557600080fd5b825164010000000081118282018810171561472f57600080fd5b82525081516020918201929091019080838360005b8381101561475c578181015183820152602001614744565b50505050905090810190601f1680156147895780820380516001836020036101000a031916815260200191505b506040525050509050919050565b73ffffffffffffffffffffffffffffffffffffffff821660009081526008602052604090206001015460ff1661482e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f6f6e6c7920616374697665206b65657065727300000000000000000000000000604482015290519081900360640190fd5b8083604001516bffffffffffffffffffffffff1610156148af57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f696e73756666696369656e742066756e64730000000000000000000000000000604482015290519081900360640190fd5b8173ffffffffffffffffffffffffffffffffffffffff168360a0015173ffffffffffffffffffffffffffffffffffffffff161415611d0157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f6b656570657273206d7573742074616b65207475726e73000000000000000000604482015290519081900360640190fd5b3b151590565b73ffffffffffffffffffffffffffffffffffffffff81163314156149d957604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b3390565b6000818152600760205260409020600201544367ffffffffffffffff909116116135aa57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f696e76616c69642075706b656570206964000000000000000000000000000000604482015290519081900360640190fd5b60005a611388811015614af057600080fd5b611388810390508460408204820311614b0857600080fd5b50823b614b1457600080fd5b60008083516020850160008789f1949350505050565b6000826bffffffffffffffffffffffff16826bffffffffffffffffffffffff16111561394757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b600082614bc657506000611b70565b82820282848281614bd357fe5b0414614598576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526021815260200180614e806021913960400191505060405180910390fd5b6000808211614c9a57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f536166654d6174683a206469766973696f6e206279207a65726f000000000000604482015290519081900360640190fd5b6000828481614ca557fe5b04949350505050565b828054828255906000526020600020908101928215614d26579160200282015b82811115614d265781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190614cce565b50614d32929150614e23565b5090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282614d6c5760008555614d26565b82601f10614da3578280017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00823516178555614d26565b82800160010185558215614d26579182015b82811115614d26578235825591602001919060010190614db5565b6040518060e00160405280600073ffffffffffffffffffffffffffffffffffffffff1681526020016000815260200160608152602001600081526020016000815260200160008152602001600081525090565b5b80821115614d325760008155600101614e2456fe63616e6e6f742073657420706179656520746f20746865207a65726f20616464726573734f6e6c792063616c6c61626c65206279206f776e6572206f7220726567697374726172536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f7761646472657373206c69737473206e6f74207468652073616d65206c656e677468a164736f6c6343000706000a", + Bin: "0x60e06040523480156200001157600080fd5b50604051620052493803806200524983398181016040526101608110156200003857600080fd5b508051602082015160408301516060840151608085015160a086015160c087015160e08801516101008901516101208a0151610140909a0151989997989697959694959394929391929091903380600081620000db576040805162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f0000000000000000604482015290519081900360640190fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156200010e576200010e8162000165565b50506001600255506003805460ff191690556001600160601b031960608c811b82166080528b811b821660a0528a901b1660c05262000154888888888888888862000215565b505050505050505050505062000487565b6001600160a01b038116331415620001c4576040805162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6200021f62000425565b6040518060c001604052808963ffffffff1681526020018863ffffffff1681526020018762ffffff1681526020018663ffffffff1681526020018562ffffff1681526020018461ffff16815250600b60008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548162ffffff021916908362ffffff160217905550606082015181600001600b6101000a81548163ffffffff021916908363ffffffff160217905550608082015181600001600f6101000a81548162ffffff021916908362ffffff16021790555060a08201518160000160126101000a81548161ffff021916908361ffff16021790555090505081600c8190555080600d819055507feb3c06937e6595fd80ec1add18a195026d5cf65f122cc3ffedbfb18a9ed80b3988878787878787604051808863ffffffff1681526020018762ffffff1681526020018663ffffffff1681526020018562ffffff1681526020018461ffff16815260200183815260200182815260200197505050505050505060405180910390a16040805163ffffffff8916815290517f17b46a44a823646eef686b7824df2962de896bc9a012a60b67694c5cbf184d8b9181900360200190a15050505050505050565b6000546001600160a01b0316331462000485576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b565b60805160601c60a05160601c60c05160601c614d64620004e560003980610d0052806140085250806119eb52806140db525080610bf55280610fa652806113a2528061147752806119485280611b825280611c505250614d646000f3fe608060405234801561001057600080fd5b50600436106102265760003560e01c8063a4c0ed361161012a578063c41b813a116100bd578063db30a3861161008c578063f2fde38b11610071578063f2fde38b14610add578063faab9d3914610b10578063fecf27c914610b4357610226565b8063db30a38614610a41578063eb5dcd6c14610aa257610226565b8063c41b813a14610784578063c7c3a19a14610851578063c804802214610970578063da5c67411461098d57610226565b8063b657bc9c116100f9578063b657bc9c14610645578063b79550be14610662578063b7fdb4361461066a578063c3f909d41461072c57610226565b8063a4c0ed361461053d578063a710b221146105cf578063ad1783611461060a578063b121e1471461061257610226565b80635c975abb116101bd5780638456cb591161018c5780638da5cb5b116101715780638da5cb5b146104c657806393f0c1fc146104ce578063948108f71461050c57610226565b80638456cb591461049d5780638a601fc8146104a557610226565b80635c975abb146103c9578063744bfe61146103e557806379ba50971461041e5780637bbaf1ea1461042657610226565b80632cb6864d116101f95780632cb6864d146103a75780633f4ba83a146103af5780634584a419146103b95780634d3f7334146103c157610226565b806315a126ea1461022b578063181f5a77146102835780631b6b6d23146103005780631e12b8a514610331575b600080fd5b610233610b4b565b60408051602080825283518183015283519192839290830191858101910280838360005b8381101561026f578181015183820152602001610257565b505050509050019250505060405180910390f35b61028b610bba565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102c55781810151838201526020016102ad565b50505050905090810190601f1680156102f25780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610308610bf3565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190f35b6103646004803603602081101561034757600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610c17565b6040805173ffffffffffffffffffffffffffffffffffffffff909416845291151560208401526bffffffffffffffffffffffff1682820152519081900360600190f35b610233610c95565b6103b7610cec565b005b610308610cfe565b610308610d22565b6103d1610d3e565b604080519115158252519081900360200190f35b6103b7600480360360408110156103fb57600080fd5b508035906020013573ffffffffffffffffffffffffffffffffffffffff16610d47565b6103b7611067565b6103d16004803603604081101561043c57600080fd5b8135919081019060408101602082013564010000000081111561045e57600080fd5b82018360208201111561047057600080fd5b8035906020019184600183028401116401000000008311171561049257600080fd5b509092509050611169565b6103b76111bf565b6104ad6111cf565b6040805163ffffffff9092168252519081900360200190f35b6103086111e3565b6104eb600480360360208110156104e457600080fd5b50356111ff565b604080516bffffffffffffffffffffffff9092168252519081900360200190f35b6103b76004803603604081101561052257600080fd5b50803590602001356bffffffffffffffffffffffff16611235565b6103b76004803603606081101561055357600080fd5b73ffffffffffffffffffffffffffffffffffffffff8235169160208101359181019060608101604082013564010000000081111561059057600080fd5b8201836020820111156105a257600080fd5b803590602001918460018302840111640100000000831117156105c457600080fd5b50909250905061145f565b6103b7600480360360408110156105e557600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166116fb565b6103086119e9565b6103b76004803603602081101561062857600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16611a0d565b6104eb6004803603602081101561065b57600080fd5b5035611b3a565b6103b7611b76565b6103b76004803603604081101561068057600080fd5b81019060208101813564010000000081111561069b57600080fd5b8201836020820111156106ad57600080fd5b803590602001918460208302840111640100000000831117156106cf57600080fd5b9193909290916020810190356401000000008111156106ed57600080fd5b8201836020820111156106ff57600080fd5b8035906020019184602083028401116401000000008311171561072157600080fd5b509092509050611d06565b610734612223565b6040805163ffffffff988916815262ffffff9788166020820152959097168588015292909416606084015261ffff16608083015260a082019290925260c081019190915290519081900360e00190f35b6107bd6004803603604081101561079a57600080fd5b508035906020013573ffffffffffffffffffffffffffffffffffffffff166122cc565b6040518080602001868152602001858152602001848152602001838152602001828103825287818151815260200191508051906020019080838360005b838110156108125781810151838201526020016107fa565b50505050905090810190601f16801561083f5780820380516001836020036101000a031916815260200191505b50965050505050505060405180910390f35b61086e6004803603602081101561086757600080fd5b503561296b565b604051808873ffffffffffffffffffffffffffffffffffffffff1681526020018763ffffffff16815260200180602001866bffffffffffffffffffffffff1681526020018573ffffffffffffffffffffffffffffffffffffffff1681526020018473ffffffffffffffffffffffffffffffffffffffff1681526020018367ffffffffffffffff168152602001828103825287818151815260200191508051906020019080838360005b8381101561092f578181015183820152602001610917565b50505050905090810190601f16801561095c5780820380516001836020036101000a031916815260200191505b509850505050505050505060405180910390f35b6103b76004803603602081101561098657600080fd5b5035612b14565b610a2f600480360360808110156109a357600080fd5b73ffffffffffffffffffffffffffffffffffffffff823581169263ffffffff602082013516926040820135909216918101906080810160608201356401000000008111156109f057600080fd5b820183602082011115610a0257600080fd5b80359060200191846001830284011164010000000083111715610a2457600080fd5b509092509050612d75565b60408051918252519081900360200190f35b6103b76004803603610100811015610a5857600080fd5b5063ffffffff8135811691602081013582169162ffffff604083013581169260608101359092169160808101359091169061ffff60a0820135169060c08101359060e001356131c1565b6103b760048036036040811015610ab857600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166133cf565b6103b760048036036020811015610af357600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16613599565b6103b760048036036020811015610b2657600080fd5b503573ffffffffffffffffffffffffffffffffffffffff166135ad565b610a2f61375c565b60606006805480602002602001604051908101604052809291908181526020018280548015610bb057602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610b85575b5050505050905090565b6040518060400160405280601481526020017f4b6565706572526567697374727920312e322e3000000000000000000000000081525081565b7f000000000000000000000000000000000000000000000000000000000000000081565b73ffffffffffffffffffffffffffffffffffffffff90811660009081526008602090815260409182902082516060810184528154948516808252740100000000000000000000000000000000000000009095046bffffffffffffffffffffffff1692810183905260019091015460ff16151592018290529192909190565b60606005805480602002602001604051908101604052809291908181526020018280548015610bb057602002820191906000526020600020905b815481526020019060010190808311610ccf575050505050905090565b610cf4613762565b610cfc6137e8565b565b7f000000000000000000000000000000000000000000000000000000000000000081565b600f5473ffffffffffffffffffffffffffffffffffffffff1690565b60035460ff1690565b8073ffffffffffffffffffffffffffffffffffffffff8116610dca57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f63616e6e6f742073656e6420746f207a65726f20616464726573730000000000604482015290519081900360640190fd5b6000838152600760205260409020600101546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff163314610e6f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c652062792061646d696e00000000000000000000604482015290519081900360640190fd5b6000838152600760205260409020600201544367ffffffffffffffff9091161115610efb57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f75706b656570206d7573742062652063616e63656c6564000000000000000000604482015290519081900360640190fd5b600083815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff0000000000000000000000008116909155600e546bffffffffffffffffffffffff90911690610f5290826138d6565b600e556040805182815273ffffffffffffffffffffffffffffffffffffffff85166020820152815186927ff3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318928290030190a27f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb84836040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b15801561103557600080fd5b505af1158015611049573d6000803e3d6000fd5b505050506040513d602081101561105f57600080fd5b505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146110ed57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015290519081900360640190fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60006111b76111b2338686868080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506001925061394d915050565b613a0e565b949350505050565b6111c7613762565b610cfc613f0d565b600b54640100000000900463ffffffff1690565b60005473ffffffffffffffffffffffffffffffffffffffff1690565b600080600061120c613fd5565b91509150600061121d8360006141b4565b905061122a8582846141fa565b93505050505b919050565b60008281526007602052604090206002015467ffffffffffffffff908116146112bf57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f75706b656570206d757374206265206163746976650000000000000000000000604482015290519081900360640190fd5b6000828152600760205260409020600101546112e9906bffffffffffffffffffffffff16826143a9565b600083815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff928316179055600e5461133f918316614435565b600e55604080517f23b872dd0000000000000000000000000000000000000000000000000000000081523360048201523060248201526bffffffffffffffffffffffff83166044820152905173ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016916323b872dd9160648083019260209291908290030181600087803b1580156113ea57600080fd5b505af11580156113fe573d6000803e3d6000fd5b505050506040513d602081101561141457600080fd5b5050604080516bffffffffffffffffffffffff831681529051339184917fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039181900360200190a35050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461150357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6f6e6c792063616c6c61626c65207468726f756768204c494e4b000000000000604482015290519081900360640190fd5b6020811461157257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f64617461206d7573742062652033322062797465730000000000000000000000604482015290519081900360640190fd5b60008282602081101561158457600080fd5b503560008181526007602052604090206002015490915067ffffffffffffffff9081161461161357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f75706b656570206d757374206265206163746976650000000000000000000000604482015290519081900360640190fd5b60008181526007602052604090206001015461163d906bffffffffffffffffffffffff16856143a9565b600082815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff92909216919091179055600e546116969085614435565b600e55604080516bffffffffffffffffffffffff86168152905173ffffffffffffffffffffffffffffffffffffffff87169183917fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039181900360200190a35050505050565b8073ffffffffffffffffffffffffffffffffffffffff811661177e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f63616e6e6f742073656e6420746f207a65726f20616464726573730000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff83811660009081526008602090815260409182902082516060810184528154948516808252740100000000000000000000000000000000000000009095046bffffffffffffffffffffffff16928101929092526001015460ff1615159181019190915290331461186457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c6520627920706179656500000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff80851660009081526008602090815260409091208054909216909155810151600e546118b2916bffffffffffffffffffffffff166138d6565b600e819055508273ffffffffffffffffffffffffffffffffffffffff1681602001516bffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167f9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f4069833604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a47f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb8483602001516040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff168152602001826bffffffffffffffffffffffff16815260200192505050602060405180830381600087803b15801561103557600080fd5b7f000000000000000000000000000000000000000000000000000000000000000081565b73ffffffffffffffffffffffffffffffffffffffff818116600090815260096020526040902054163314611aa257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f6f6e6c792063616c6c61626c652062792070726f706f73656420706179656500604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff81811660008181526008602090815260408083208054337fffffffffffffffffffffffff000000000000000000000000000000000000000080831682179093556009909452828520805490921690915590519416939092849290917f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b39190a45050565b600081815260076020526040812054611b709074010000000000000000000000000000000000000000900463ffffffff166111ff565b92915050565b611b7e613762565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166370a08231306040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611c0757600080fd5b505afa158015611c1b573d6000803e3d6000fd5b505050506040513d6020811015611c3157600080fd5b5051600e5490915073ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb903390611c849085906138d6565b6040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b158015611cd757600080fd5b505af1158015611ceb573d6000803e3d6000fd5b505050506040513d6020811015611d0157600080fd5b505050565b611d0e613762565b828114611d66576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526021815260200180614d376021913960400191505060405180910390fd5b6002831015611dd657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6e6f7420656e6f756768206b6565706572730000000000000000000000000000604482015290519081900360640190fd5b60005b600654811015611e5657600060068281548110611df257fe5b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff1682526008905260409020600190810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055919091019050611dd9565b5060005b83811015612140576000858583818110611e7057fe5b73ffffffffffffffffffffffffffffffffffffffff6020918202939093013583166000818152600890925260408220805491955093169150868686818110611eb457fe5b9050602002013573ffffffffffffffffffffffffffffffffffffffff169050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415611f59576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526024815260200180614ccf6024913960400191505060405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff82161580611fa757508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16145b80611fc7575073ffffffffffffffffffffffffffffffffffffffff818116145b61203257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f63616e6e6f74206368616e676520706179656500000000000000000000000000604482015290519081900360640190fd5b600183015460ff16156120a657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f63616e6e6f7420616464206b6565706572207477696365000000000000000000604482015290519081900360640190fd5b600183810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016909117905573ffffffffffffffffffffffffffffffffffffffff818116146121305782547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff82161783555b505060019092019150611e5a9050565b5061214d60068585614b44565b507f056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f848484846040518080602001806020018381038352878782818152602001925060200280828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169091018481038352858152602090810191508690860280828437600083820152604051601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018290039850909650505050505050a150505050565b6040805160c081018252600b5463ffffffff80821680845264010000000083048216602085015268010000000000000000830462ffffff9081169585018690526b0100000000000000000000008404909216606085018190526f01000000000000000000000000000000840490921660808501819052720100000000000000000000000000000000000090930461ffff1660a0909401849052600c54600d549196929492909190565b60606000806000806122dc610d3e565b1561234857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a2070617573656400000000000000000000000000000000604482015290519081900360640190fd5b6123506144a9565b6000878152600760209081526040808320815160c081018352815473ffffffffffffffffffffffffffffffffffffffff80821683527401000000000000000000000000000000000000000090910463ffffffff16828601526001808401546bffffffffffffffffffffffff8116848701526c0100000000000000000000000090048216606084015260029384015467ffffffffffffffff8116608085015268010000000000000000900490911660a08301528c8652600a8552838620935160248101958652845461010092811615929092027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190911692909204604483018190529094937f6e04ff0d0000000000000000000000000000000000000000000000000000000093929091829160640190849080156124cf5780601f106124a4576101008083540402835291602001916124cf565b820191906000526020600020905b8154815290600101906020018083116124b257829003601f168201915b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009097169690961786528751600b549151835193985060009788975073ffffffffffffffffffffffffffffffffffffffff909216955063ffffffff6b01000000000000000000000090930492909216935087928291908083835b602083106125da57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161259d565b6001836020036101000a03801982511681845116808217855250505050505090500191505060006040518083038160008787f1925050503d806000811461263d576040519150601f19603f3d011682016040523d82523d6000602084013e612642565b606091505b5091509150816127dd57600061265782614516565b905060008160405160200180807f63616c6c20746f20636865636b20746172676574206661696c65643a20000000815250601d0182805190602001908083835b602083106126d457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101612697565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff018019909216911617905260408051929094018281037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0018352938490527f08c379a00000000000000000000000000000000000000000000000000000000084526004840181815282516024860152825192975087965094508493604401925085019080838360005b838110156127a257818101518382015260200161278a565b50505050905090810190601f1680156127cf5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b8080602001905160408110156127f257600080fd5b81516020830180516040519294929383019291908464010000000082111561281957600080fd5b90830190602082018581111561282e57600080fd5b825164010000000081118282018810171561284857600080fd5b82525081516020918201929091019080838360005b8381101561287557818101518382015260200161285d565b50505050905090810190601f1680156128a25780820380516001836020036101000a031916815260200191505b50604052505050809a5081935050508161291d57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f75706b656570206e6f74206e6565646564000000000000000000000000000000604482015290519081900360640190fd5b600061292c8b8d8c600061394d565b9050612941858260000151836060015161462d565b6060810151608082015160a083015160c0909301519b9e919d509b50909998509650505050505050565b6000818152600760209081526040808320815160c081018352815473ffffffffffffffffffffffffffffffffffffffff8082168084527401000000000000000000000000000000000000000090920463ffffffff168387018190526001808601546bffffffffffffffffffffffff81168689019081526c010000000000000000000000009091048416606080880191825260029889015467ffffffffffffffff811660808a019081526801000000000000000090910490961660a089019081528d8d52600a8c528a8d20935190519251965184548c5161010097821615979097027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01169a909a04601f81018d90048d0286018d01909b528a85528c9b919a8c9a8b9a8b9a8b9a91999098909796949591939091879190830182828015612af35780601f10612ac857610100808354040283529160200191612af3565b820191906000526020600020905b815481529060010190602001808311612ad657829003601f168201915b50505050509450975097509750975097509750975050919395979092949650565b60008181526007602052604081206002015467ffffffffffffffff9081169190821490612b3f6111e3565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161490508180612b8c5750808015612b8c5750438367ffffffffffffffff16115b612bf757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f746f6f206c61746520746f2063616e63656c2075706b65657000000000000000604482015290519081900360640190fd5b8080612c3957506000848152600760205260409020600101546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1633145b612ca457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f6f6e6c79206f776e6572206f722061646d696e00000000000000000000000000604482015290519081900360640190fd5b4381612cb857612cb5816032614435565b90505b600085815260076020526040902060020180547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff83161790558215612d3757600580546001810182556000919091527f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0018590555b60405167ffffffffffffffff82169086907f91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f79118190600090a35050505050565b6000612d7f6111e3565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161480612dcf5750600f5473ffffffffffffffffffffffffffffffffffffffff1633145b612e24576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180614cf36023913960400191505060405180910390fd5b612e438673ffffffffffffffffffffffffffffffffffffffff166147e4565b612eae57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f746172676574206973206e6f74206120636f6e74726163740000000000000000604482015290519081900360640190fd5b6108fc8563ffffffff161015612f2557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600f60248201527f6d696e2067617320697320323330300000000000000000000000000000000000604482015290519081900360640190fd5b624c4b408563ffffffff161115612f9d57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6d61782067617320697320353030303030300000000000000000000000000000604482015290519081900360640190fd5b506004546040805160c08101825273ffffffffffffffffffffffffffffffffffffffff808916825263ffffffff808916602080850191825260008587018181528b86166060880190815267ffffffffffffffff6080890181815260a08a018581528c8652600787528b86209a518b54985190991674010000000000000000000000000000000000000000027fffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffff998b167fffffffffffffffffffffffff000000000000000000000000000000000000000090991698909817989098169690961789559151600189018054925189166c01000000000000000000000000026bffffffffffffffffffffffff9283167fffffffffffffffffffffffffffffffffffffffff00000000000000000000000090941693909317909116919091179055925160029096018054945190951668010000000000000000027fffffffff0000000000000000000000000000000000000000ffffffffffffffff969093167fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000909416939093179490941617909155600a90915220613159908484614bcc565b506004805460010190556040805163ffffffff8716815273ffffffffffffffffffffffffffffffffffffffff86166020820152815183927fbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012928290030190a295945050505050565b6131c9613762565b6040518060c001604052808963ffffffff1681526020018863ffffffff1681526020018762ffffff1681526020018663ffffffff1681526020018562ffffff1681526020018461ffff16815250600b60008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548162ffffff021916908362ffffff160217905550606082015181600001600b6101000a81548163ffffffff021916908363ffffffff160217905550608082015181600001600f6101000a81548162ffffff021916908362ffffff16021790555060a08201518160000160126101000a81548161ffff021916908361ffff16021790555090505081600c8190555080600d819055507feb3c06937e6595fd80ec1add18a195026d5cf65f122cc3ffedbfb18a9ed80b3988878787878787604051808863ffffffff1681526020018762ffffff1681526020018663ffffffff1681526020018562ffffff1681526020018461ffff16815260200183815260200182815260200197505050505050505060405180910390a16040805163ffffffff8916815290517f17b46a44a823646eef686b7824df2962de896bc9a012a60b67694c5cbf184d8b9181900360200190a15050505050505050565b73ffffffffffffffffffffffffffffffffffffffff82811660009081526008602052604090205416331461346457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c6520627920706179656500000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff81163314156134e957604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f63616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600960205260409020548116908216146135955773ffffffffffffffffffffffffffffffffffffffff82811660008181526009602052604080822080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169486169485179055513392917f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836791a45b5050565b6135a1613762565b6135aa816147ea565b50565b6135b56111e3565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806136055750600f5473ffffffffffffffffffffffffffffffffffffffff1633145b61365a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180614cf36023913960400191505060405180910390fd5b600f5473ffffffffffffffffffffffffffffffffffffffff9081169082168114156136e657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600e60248201527f53616d6520726567697374726172000000000000000000000000000000000000604482015290519081900360640190fd5b600f80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff84811691821790925560405190918316907f9bf4a5b30267728df68663e14adb47e559863967c419dc6030638883408bed2e90600090a35050565b60045490565b60005473ffffffffffffffffffffffffffffffffffffffff163314610cfc57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b6137f0610d3e565b61385b57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f5061757361626c653a206e6f7420706175736564000000000000000000000000604482015290519081900360640190fd5b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa6138ac6148e5565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190a1565b60008282111561394757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b50900390565b613955614c66565b60008481526007602052604081205474010000000000000000000000000000000000000000900463ffffffff16908061398c613fd5565b91509150600061399c83876141b4565b905060006139ab8583856141fa565b6040805160e08101825273ffffffffffffffffffffffffffffffffffffffff8d168152602081018c90529081018a90526bffffffffffffffffffffffff909116606082015260808101959095525060a084015260c0830152509050949350505050565b6000600280541415613a8157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f5265656e7472616e637947756172643a207265656e7472616e742063616c6c00604482015290519081900360640190fd5b600280556020820151613a93816148e9565b602083810151600090815260078252604090819020815160c081018352815473ffffffffffffffffffffffffffffffffffffffff80821683527401000000000000000000000000000000000000000090910463ffffffff169482019490945260018201546bffffffffffffffffffffffff8116938201939093526c01000000000000000000000000909204831660608084019190915260029091015467ffffffffffffffff8116608084015268010000000000000000900490921660a08201528451918501519091613b679183919061462d565b60005a90506000634585e33b60e01b86604001516040516024018080602001828103825283818151815260200191508051906020019080838360005b83811015613bbb578181015183820152602001613ba3565b50505050905090810190601f168015613be85780820380516001836020036101000a031916815260200191505b50604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009096169590951790945250505060808701518451919250613c7d9183614974565b94505a820391506000613c99838860a001518960c001516141fa565b60208089015160009081526007909152604081206001015491925090613ccd906bffffffffffffffffffffffff16836149c0565b60208981018051600090815260078352604080822060010180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff878116919091179091558d519351835281832060020180547fffffffff0000000000000000000000000000000000000000ffffffffffffffff166801000000000000000073ffffffffffffffffffffffffffffffffffffffff968716021790558d5190931682526008909352918220549293509091613daf917401000000000000000000000000000000000000000090910416846143a9565b905080600860008b6000015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060000160146101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550886000015173ffffffffffffffffffffffffffffffffffffffff168815158a602001517fcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6868d6040015160405180836bffffffffffffffffffffffff16815260200180602001828103825283818151815260200191508051906020019080838360005b83811015613ec1578181015183820152602001613ea9565b50505050905090810190601f168015613eee5780820380516001836020036101000a031916815260200191505b50935050505060405180910390a4505050505050506001600255919050565b613f15610d3e565b15613f8157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a2070617573656400000000000000000000000000000000604482015290519081900360640190fd5b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a2586138ac6148e5565b6000806000600b600001600f9054906101000a900462ffffff1662ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b15801561406c57600080fd5b505afa158015614080573d6000803e3d6000fd5b505050506040513d60a081101561409657600080fd5b506020810151606090910151925090508280156140ba57508142038463ffffffff16105b806140c6575060008113155b156140d557600c5495506140d9565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b15801561413f57600080fd5b505afa158015614153573d6000803e3d6000fd5b505050506040513d60a081101561416957600080fd5b5060208101516060909101519250905082801561418d57508142038463ffffffff16105b80614199575060008113155b156141a857600d5494506141ac565b8094505b505050509091565b600b546000906141df9084907201000000000000000000000000000000000000900461ffff16614a4d565b90508180156141ed5750803a105b15611b7057503a92915050565b6040805160c081018252600b5463ffffffff808216835264010000000082048116602084015262ffffff6801000000000000000083048116948401949094526b0100000000000000000000008204811660608401526f010000000000000000000000000000008204909316608083015261ffff72010000000000000000000000000000000000009091041660a082015260009182906142ab906142a4908890620138809061443516565b8690614a4d565b905060006142d0836000015163ffffffff16633b9aca0061443590919063ffffffff16565b905060006143216142f964e8d4a51000866020015163ffffffff16614a4d90919063ffffffff16565b61431b886143158661430f89633b9aca00614a4d565b90614a4d565b90614ac0565b90614435565b90506b033b2e3c9fd0803ce800000081111561439e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f7061796d656e742067726561746572207468616e20616c6c204c494e4b000000604482015290519081900360640190fd5b979650505050505050565b60008282016bffffffffffffffffffffffff808516908216101561442e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b9392505050565b60008282018381101561442e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b3215610cfc57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6f6e6c7920666f722073696d756c61746564206261636b656e64000000000000604482015290519081900360640190fd5b606060448251101561455c575060408051808201909152601d81527f7472616e73616374696f6e2072657665727465642073696c656e746c790000006020820152611230565b600482018051909260240190602081101561457657600080fd5b810190808051604051939291908464010000000082111561459657600080fd5b9083019060208201858111156145ab57600080fd5b82516401000000008111828201881017156145c557600080fd5b82525081516020918201929091019080838360005b838110156145f25781810151838201526020016145da565b50505050905090810190601f16801561461f5780820380516001836020036101000a031916815260200191505b506040525050509050919050565b73ffffffffffffffffffffffffffffffffffffffff821660009081526008602052604090206001015460ff166146c457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f6f6e6c7920616374697665206b65657065727300000000000000000000000000604482015290519081900360640190fd5b8083604001516bffffffffffffffffffffffff16101561474557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f696e73756666696369656e742066756e64730000000000000000000000000000604482015290519081900360640190fd5b8173ffffffffffffffffffffffffffffffffffffffff168360a0015173ffffffffffffffffffffffffffffffffffffffff161415611d0157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f6b656570657273206d7573742074616b65207475726e73000000000000000000604482015290519081900360640190fd5b3b151590565b73ffffffffffffffffffffffffffffffffffffffff811633141561486f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b3390565b6000818152600760205260409020600201544367ffffffffffffffff909116116135aa57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f696e76616c69642075706b656570206964000000000000000000000000000000604482015290519081900360640190fd5b60005a61138881101561498657600080fd5b61138881039050846040820482031161499e57600080fd5b50823b6149aa57600080fd5b60008083516020850160008789f1949350505050565b6000826bffffffffffffffffffffffff16826bffffffffffffffffffffffff16111561394757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b600082614a5c57506000611b70565b82820282848281614a6957fe5b041461442e576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526021815260200180614d166021913960400191505060405180910390fd5b6000808211614b3057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f536166654d6174683a206469766973696f6e206279207a65726f000000000000604482015290519081900360640190fd5b6000828481614b3b57fe5b04949350505050565b828054828255906000526020600020908101928215614bbc579160200282015b82811115614bbc5781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190614b64565b50614bc8929150614cb9565b5090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282614c025760008555614bbc565b82601f10614c39578280017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00823516178555614bbc565b82800160010185558215614bbc579182015b82811115614bbc578235825591602001919060010190614c4b565b6040518060e00160405280600073ffffffffffffffffffffffffffffffffffffffff1681526020016000815260200160608152602001600081526020016000815260200160008152602001600081525090565b5b80821115614bc85760008155600101614cba56fe63616e6e6f742073657420706179656520746f20746865207a65726f20616464726573734f6e6c792063616c6c61626c65206279206f776e6572206f7220726567697374726172536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f7761646472657373206c69737473206e6f74207468652073616d65206c656e677468a164736f6c6343000706000a", } var KeeperRegistryABI = KeeperRegistryMetaData.ABI diff --git a/core/internal/gethwrappers/generated/log_emitter/log_emitter.go b/core/internal/gethwrappers/generated/log_emitter/log_emitter.go new file mode 100644 index 00000000000..7804f4350e6 --- /dev/null +++ b/core/internal/gethwrappers/generated/log_emitter/log_emitter.go @@ -0,0 +1,628 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package log_emitter + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +var LogEmitterMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"Log1\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"Log2\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"name\":\"Log3\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"v\",\"type\":\"uint256[]\"}],\"name\":\"EmitLog1\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"v\",\"type\":\"uint256[]\"}],\"name\":\"EmitLog2\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string[]\",\"name\":\"v\",\"type\":\"string[]\"}],\"name\":\"EmitLog3\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5061053e806100206000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c8063696933c914610046578063bc253bc01461005b578063d9c21f461461006e575b600080fd5b6100596100543660046102f5565b610081565b005b6100596100693660046102f5565b6100f5565b61005961007c3660046101c7565b610159565b60005b81518110156100f1577f46692c0e59ca9cd1ad8f984a9d11715ec83424398b7eed4e05c8ce84662415a88282815181106100c0576100c06104d3565b60200260200101516040516100d791815260200190565b60405180910390a1806100e981610473565b915050610084565b5050565b60005b81518110156100f157818181518110610113576101136104d3565b60200260200101517f624fb00c2ce79f34cb543884c3af64816dce0f4cec3d32661959e49d488a7a9360405160405180910390a28061015181610473565b9150506100f8565b60005b81518110156100f1577fb94ec34dfe32a8a7170992a093976368d1e63decf8f0bc0b38a8eb89cc9f95cf828281518110610198576101986104d3565b60200260200101516040516101ad919061038d565b60405180910390a1806101bf81610473565b91505061015c565b600060208083850312156101da57600080fd5b823567ffffffffffffffff808211156101f257600080fd5b8185019150601f868184011261020757600080fd5b823561021a6102158261044f565b610400565b8082825286820191508686018a888560051b890101111561023a57600080fd5b60005b848110156102e55781358781111561025457600080fd5b8801603f81018d1361026557600080fd5b8981013560408982111561027b5761027b610502565b6102aa8c7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08b85011601610400565b8281528f828486010111156102be57600080fd5b828285018e83013760009281018d019290925250855250928801929088019060010161023d565b50909a9950505050505050505050565b6000602080838503121561030857600080fd5b823567ffffffffffffffff81111561031f57600080fd5b8301601f8101851361033057600080fd5b803561033e6102158261044f565b80828252848201915084840188868560051b870101111561035e57600080fd5b600094505b83851015610381578035835260019490940193918501918501610363565b50979650505050505050565b600060208083528351808285015260005b818110156103ba5785810183015185820160400152820161039e565b818111156103cc576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff8111828210171561044757610447610502565b604052919050565b600067ffffffffffffffff82111561046957610469610502565b5060051b60200190565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8214156104cc577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var LogEmitterABI = LogEmitterMetaData.ABI + +var LogEmitterBin = LogEmitterMetaData.Bin + +func DeployLogEmitter(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *LogEmitter, error) { + parsed, err := LogEmitterMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LogEmitterBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LogEmitter{LogEmitterCaller: LogEmitterCaller{contract: contract}, LogEmitterTransactor: LogEmitterTransactor{contract: contract}, LogEmitterFilterer: LogEmitterFilterer{contract: contract}}, nil +} + +type LogEmitter struct { + address common.Address + abi abi.ABI + LogEmitterCaller + LogEmitterTransactor + LogEmitterFilterer +} + +type LogEmitterCaller struct { + contract *bind.BoundContract +} + +type LogEmitterTransactor struct { + contract *bind.BoundContract +} + +type LogEmitterFilterer struct { + contract *bind.BoundContract +} + +type LogEmitterSession struct { + Contract *LogEmitter + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type LogEmitterCallerSession struct { + Contract *LogEmitterCaller + CallOpts bind.CallOpts +} + +type LogEmitterTransactorSession struct { + Contract *LogEmitterTransactor + TransactOpts bind.TransactOpts +} + +type LogEmitterRaw struct { + Contract *LogEmitter +} + +type LogEmitterCallerRaw struct { + Contract *LogEmitterCaller +} + +type LogEmitterTransactorRaw struct { + Contract *LogEmitterTransactor +} + +func NewLogEmitter(address common.Address, backend bind.ContractBackend) (*LogEmitter, error) { + abi, err := abi.JSON(strings.NewReader(LogEmitterABI)) + if err != nil { + return nil, err + } + contract, err := bindLogEmitter(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LogEmitter{address: address, abi: abi, LogEmitterCaller: LogEmitterCaller{contract: contract}, LogEmitterTransactor: LogEmitterTransactor{contract: contract}, LogEmitterFilterer: LogEmitterFilterer{contract: contract}}, nil +} + +func NewLogEmitterCaller(address common.Address, caller bind.ContractCaller) (*LogEmitterCaller, error) { + contract, err := bindLogEmitter(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LogEmitterCaller{contract: contract}, nil +} + +func NewLogEmitterTransactor(address common.Address, transactor bind.ContractTransactor) (*LogEmitterTransactor, error) { + contract, err := bindLogEmitter(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LogEmitterTransactor{contract: contract}, nil +} + +func NewLogEmitterFilterer(address common.Address, filterer bind.ContractFilterer) (*LogEmitterFilterer, error) { + contract, err := bindLogEmitter(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LogEmitterFilterer{contract: contract}, nil +} + +func bindLogEmitter(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(LogEmitterABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +func (_LogEmitter *LogEmitterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LogEmitter.Contract.LogEmitterCaller.contract.Call(opts, result, method, params...) +} + +func (_LogEmitter *LogEmitterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LogEmitter.Contract.LogEmitterTransactor.contract.Transfer(opts) +} + +func (_LogEmitter *LogEmitterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LogEmitter.Contract.LogEmitterTransactor.contract.Transact(opts, method, params...) +} + +func (_LogEmitter *LogEmitterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LogEmitter.Contract.contract.Call(opts, result, method, params...) +} + +func (_LogEmitter *LogEmitterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LogEmitter.Contract.contract.Transfer(opts) +} + +func (_LogEmitter *LogEmitterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LogEmitter.Contract.contract.Transact(opts, method, params...) +} + +func (_LogEmitter *LogEmitterTransactor) EmitLog1(opts *bind.TransactOpts, v []*big.Int) (*types.Transaction, error) { + return _LogEmitter.contract.Transact(opts, "EmitLog1", v) +} + +func (_LogEmitter *LogEmitterSession) EmitLog1(v []*big.Int) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog1(&_LogEmitter.TransactOpts, v) +} + +func (_LogEmitter *LogEmitterTransactorSession) EmitLog1(v []*big.Int) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog1(&_LogEmitter.TransactOpts, v) +} + +func (_LogEmitter *LogEmitterTransactor) EmitLog2(opts *bind.TransactOpts, v []*big.Int) (*types.Transaction, error) { + return _LogEmitter.contract.Transact(opts, "EmitLog2", v) +} + +func (_LogEmitter *LogEmitterSession) EmitLog2(v []*big.Int) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog2(&_LogEmitter.TransactOpts, v) +} + +func (_LogEmitter *LogEmitterTransactorSession) EmitLog2(v []*big.Int) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog2(&_LogEmitter.TransactOpts, v) +} + +func (_LogEmitter *LogEmitterTransactor) EmitLog3(opts *bind.TransactOpts, v []string) (*types.Transaction, error) { + return _LogEmitter.contract.Transact(opts, "EmitLog3", v) +} + +func (_LogEmitter *LogEmitterSession) EmitLog3(v []string) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog3(&_LogEmitter.TransactOpts, v) +} + +func (_LogEmitter *LogEmitterTransactorSession) EmitLog3(v []string) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog3(&_LogEmitter.TransactOpts, v) +} + +type LogEmitterLog1Iterator struct { + Event *LogEmitterLog1 + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogEmitterLog1Iterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog1) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog1) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogEmitterLog1Iterator) Error() error { + return it.fail +} + +func (it *LogEmitterLog1Iterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogEmitterLog1 struct { + Arg0 *big.Int + Raw types.Log +} + +func (_LogEmitter *LogEmitterFilterer) FilterLog1(opts *bind.FilterOpts) (*LogEmitterLog1Iterator, error) { + + logs, sub, err := _LogEmitter.contract.FilterLogs(opts, "Log1") + if err != nil { + return nil, err + } + return &LogEmitterLog1Iterator{contract: _LogEmitter.contract, event: "Log1", logs: logs, sub: sub}, nil +} + +func (_LogEmitter *LogEmitterFilterer) WatchLog1(opts *bind.WatchOpts, sink chan<- *LogEmitterLog1) (event.Subscription, error) { + + logs, sub, err := _LogEmitter.contract.WatchLogs(opts, "Log1") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogEmitterLog1) + if err := _LogEmitter.contract.UnpackLog(event, "Log1", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogEmitter *LogEmitterFilterer) ParseLog1(log types.Log) (*LogEmitterLog1, error) { + event := new(LogEmitterLog1) + if err := _LogEmitter.contract.UnpackLog(event, "Log1", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LogEmitterLog2Iterator struct { + Event *LogEmitterLog2 + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogEmitterLog2Iterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog2) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog2) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogEmitterLog2Iterator) Error() error { + return it.fail +} + +func (it *LogEmitterLog2Iterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogEmitterLog2 struct { + Arg0 *big.Int + Raw types.Log +} + +func (_LogEmitter *LogEmitterFilterer) FilterLog2(opts *bind.FilterOpts, arg0 []*big.Int) (*LogEmitterLog2Iterator, error) { + + var arg0Rule []interface{} + for _, arg0Item := range arg0 { + arg0Rule = append(arg0Rule, arg0Item) + } + + logs, sub, err := _LogEmitter.contract.FilterLogs(opts, "Log2", arg0Rule) + if err != nil { + return nil, err + } + return &LogEmitterLog2Iterator{contract: _LogEmitter.contract, event: "Log2", logs: logs, sub: sub}, nil +} + +func (_LogEmitter *LogEmitterFilterer) WatchLog2(opts *bind.WatchOpts, sink chan<- *LogEmitterLog2, arg0 []*big.Int) (event.Subscription, error) { + + var arg0Rule []interface{} + for _, arg0Item := range arg0 { + arg0Rule = append(arg0Rule, arg0Item) + } + + logs, sub, err := _LogEmitter.contract.WatchLogs(opts, "Log2", arg0Rule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogEmitterLog2) + if err := _LogEmitter.contract.UnpackLog(event, "Log2", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogEmitter *LogEmitterFilterer) ParseLog2(log types.Log) (*LogEmitterLog2, error) { + event := new(LogEmitterLog2) + if err := _LogEmitter.contract.UnpackLog(event, "Log2", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LogEmitterLog3Iterator struct { + Event *LogEmitterLog3 + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogEmitterLog3Iterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog3) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog3) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogEmitterLog3Iterator) Error() error { + return it.fail +} + +func (it *LogEmitterLog3Iterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogEmitterLog3 struct { + Arg0 string + Raw types.Log +} + +func (_LogEmitter *LogEmitterFilterer) FilterLog3(opts *bind.FilterOpts) (*LogEmitterLog3Iterator, error) { + + logs, sub, err := _LogEmitter.contract.FilterLogs(opts, "Log3") + if err != nil { + return nil, err + } + return &LogEmitterLog3Iterator{contract: _LogEmitter.contract, event: "Log3", logs: logs, sub: sub}, nil +} + +func (_LogEmitter *LogEmitterFilterer) WatchLog3(opts *bind.WatchOpts, sink chan<- *LogEmitterLog3) (event.Subscription, error) { + + logs, sub, err := _LogEmitter.contract.WatchLogs(opts, "Log3") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogEmitterLog3) + if err := _LogEmitter.contract.UnpackLog(event, "Log3", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogEmitter *LogEmitterFilterer) ParseLog3(log types.Log) (*LogEmitterLog3, error) { + event := new(LogEmitterLog3) + if err := _LogEmitter.contract.UnpackLog(event, "Log3", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_LogEmitter *LogEmitter) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _LogEmitter.abi.Events["Log1"].ID: + return _LogEmitter.ParseLog1(log) + case _LogEmitter.abi.Events["Log2"].ID: + return _LogEmitter.ParseLog2(log) + case _LogEmitter.abi.Events["Log3"].ID: + return _LogEmitter.ParseLog3(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (LogEmitterLog1) Topic() common.Hash { + return common.HexToHash("0x46692c0e59ca9cd1ad8f984a9d11715ec83424398b7eed4e05c8ce84662415a8") +} + +func (LogEmitterLog2) Topic() common.Hash { + return common.HexToHash("0x624fb00c2ce79f34cb543884c3af64816dce0f4cec3d32661959e49d488a7a93") +} + +func (LogEmitterLog3) Topic() common.Hash { + return common.HexToHash("0xb94ec34dfe32a8a7170992a093976368d1e63decf8f0bc0b38a8eb89cc9f95cf") +} + +func (_LogEmitter *LogEmitter) Address() common.Address { + return _LogEmitter.address +} + +type LogEmitterInterface interface { + EmitLog1(opts *bind.TransactOpts, v []*big.Int) (*types.Transaction, error) + + EmitLog2(opts *bind.TransactOpts, v []*big.Int) (*types.Transaction, error) + + EmitLog3(opts *bind.TransactOpts, v []string) (*types.Transaction, error) + + FilterLog1(opts *bind.FilterOpts) (*LogEmitterLog1Iterator, error) + + WatchLog1(opts *bind.WatchOpts, sink chan<- *LogEmitterLog1) (event.Subscription, error) + + ParseLog1(log types.Log) (*LogEmitterLog1, error) + + FilterLog2(opts *bind.FilterOpts, arg0 []*big.Int) (*LogEmitterLog2Iterator, error) + + WatchLog2(opts *bind.WatchOpts, sink chan<- *LogEmitterLog2, arg0 []*big.Int) (event.Subscription, error) + + ParseLog2(log types.Log) (*LogEmitterLog2, error) + + FilterLog3(opts *bind.FilterOpts) (*LogEmitterLog3Iterator, error) + + WatchLog3(opts *bind.WatchOpts, sink chan<- *LogEmitterLog3) (event.Subscription, error) + + ParseLog3(log types.Log) (*LogEmitterLog3, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/internal/gethwrappers/generated/upkeep_perform_counter_restrictive_wrapper/upkeep_perform_counter_restrictive_wrapper.go b/core/internal/gethwrappers/generated/upkeep_perform_counter_restrictive_wrapper/upkeep_perform_counter_restrictive_wrapper.go index c803c7ac93a..21495126110 100644 --- a/core/internal/gethwrappers/generated/upkeep_perform_counter_restrictive_wrapper/upkeep_perform_counter_restrictive_wrapper.go +++ b/core/internal/gethwrappers/generated/upkeep_perform_counter_restrictive_wrapper/upkeep_perform_counter_restrictive_wrapper.go @@ -30,8 +30,8 @@ var ( ) var UpkeepPerformCounterRestrictiveMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_testRange\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_averageEligibilityCadence\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"eligible\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"initialCall\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nextEligible\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"}],\"name\":\"PerformingUpkeep\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"averageEligibilityCadence\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"checkEligible\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCountPerforms\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialCall\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextEligible\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"reset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_newTestRange\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_newAverageEligibilityCadence\",\"type\":\"uint256\"}],\"name\":\"setSpread\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"testRange\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", - Bin: "0x6080604052600080556000600155600060045534801561001e57600080fd5b5060405161049b38038061049b8339818101604052604081101561004157600080fd5b508051602090910151600291909155600355610439806100626000396000f3fe608060405234801561001057600080fd5b50600436106100be5760003560e01c8063926f086e11610076578063c228a98e1161005b578063c228a98e1461027b578063d826f88f14610297578063e303666f1461029f576100be565b8063926f086e1461026b578063a9a4c57c14610273576100be565b80636250a13a116100a75780636250a13a1461014f5780636e04ff0d146101575780637f407edf14610248576100be565b80634585e33b146100c3578063523d9b8a14610135575b600080fd5b610133600480360360208110156100d957600080fd5b8101906020810181356401000000008111156100f457600080fd5b82018360208201111561010657600080fd5b8035906020019184600183028401116401000000008311171561012857600080fd5b5090925090506102a7565b005b61013d610350565b60408051918252519081900360200190f35b61013d610356565b6101c76004803603602081101561016d57600080fd5b81019060208101813564010000000081111561018857600080fd5b82018360208201111561019a57600080fd5b803590602001918460018302840111640100000000831117156101bc57600080fd5b50909250905061035c565b60405180831515815260200180602001828103825283818151815260200191508051906020019080838360005b8381101561020c5781810151838201526020016101f4565b50505050905090810190601f1680156102395780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b6101336004803603604081101561025e57600080fd5b5080359060200135610383565b61013d61038e565b61013d610394565b61028361039a565b604080519115158252519081900360200190f35b6101336103a9565b61013d6103b3565b60006102b16103b9565b60005460015460408051841515815232602082015280820193909352606083019190915243608083018190529051929350917fbd6b6608a51477954e8b498c633bda87e5cd555e06ead50486398d9e3b9cebc09181900360a00190a18161031757600080fd5b6000546103245760008190555b6003546002026103326103dd565b8161033957fe5b060160019081018155600480549091019055505050565b60015481565b60025481565b600060606103686103b9565b60405180602001604052806000815250915091509250929050565b600291909155600355565b60005481565b60035481565b60006103a46103b9565b905090565b6000808055600455565b60045490565b6000805415806103a4575060025460005443031080156103a4575050600154431190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4301406020808301919091523082840152825180830384018152606090920190925280519101209056fea164736f6c6343000706000a", + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_testRange\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_averageEligibilityCadence\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"eligible\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"initialCall\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nextEligible\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"}],\"name\":\"PerformingUpkeep\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"averageEligibilityCadence\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"checkEligible\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"checkGasToBurn\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"dummyMap\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCountPerforms\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialCall\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextEligible\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"performGasToBurn\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"reset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"setCheckGasToBurn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"setPerformGasToBurn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_newTestRange\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_newAverageEligibilityCadence\",\"type\":\"uint256\"}],\"name\":\"setSpread\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"testRange\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x6080604052600080556000600155600060075534801561001e57600080fd5b506040516106883803806106888339818101604052604081101561004157600080fd5b508051602090910151600291909155600355610626806100626000396000f3fe608060405234801561001057600080fd5b50600436106100f55760003560e01c80637145f11b11610097578063b30566b411610066578063b30566b414610325578063c228a98e1461032d578063d826f88f14610335578063e303666f1461033d576100f5565b80637145f11b146102c15780637f407edf146102f2578063926f086e14610315578063a9a4c57c1461031d576100f5565b80634585e33b116100d35780634585e33b14610150578063523d9b8a146101c05780636250a13a146101c85780636e04ff0d146101d0576100f5565b806313bda75b146100fa5780632555d2cf146101195780632ff3617d14610136575b600080fd5b6101176004803603602081101561011057600080fd5b5035610345565b005b6101176004803603602081101561012f57600080fd5b503561034a565b61013e61034f565b60408051918252519081900360200190f35b6101176004803603602081101561016657600080fd5b81019060208101813564010000000081111561018157600080fd5b82018360208201111561019357600080fd5b803590602001918460018302840111640100000000831117156101b557600080fd5b509092509050610355565b61013e610492565b61013e610498565b610240600480360360208110156101e657600080fd5b81019060208101813564010000000081111561020157600080fd5b82018360208201111561021357600080fd5b8035906020019184600183028401116401000000008311171561023557600080fd5b50909250905061049e565b60405180831515815260200180602001828103825283818151815260200191508051906020019080838360005b8381101561028557818101518382015260200161026d565b50505050905090810190601f1680156102b25780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b6102de600480360360208110156102d757600080fd5b5035610555565b604080519115158252519081900360200190f35b6101176004803603604081101561030857600080fd5b508035906020013561056a565b61013e610575565b61013e61057b565b61013e610581565b6102de610587565b610117610596565b61013e6105a0565b600455565b600555565b60045481565b60005a905060006103646105a6565b60005460015460408051841515815232602082015280820193909352606083019190915243608083018190529051929350917fbd6b6608a51477954e8b498c633bda87e5cd555e06ead50486398d9e3b9cebc09181900360a00190a1816103ca57600080fd5b6000546103d75760008190555b6003546002026103e56105ca565b816103ec57fe5b068101600190810181556007805490910190557fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff015b6005545a8403101561048b578040600090815260066020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690557fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01610422565b5050505050565b60015481565b60025481565b6000606060005a90507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff430160005b6004545a84031015610522578080156104f65750814060009081526006602052604090205460ff165b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9092019190506104cd565b61052a6105a6565b6040805192151560208085019190915281518085039091018152928101905297909650945050505050565b60066020526000908152604090205460ff1681565b600291909155600355565b60005481565b60035481565b60055481565b60006105916105a6565b905090565b6000808055600755565b60075490565b60008054158061059157506002546000544303108015610591575050600154431190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4301406020808301919091523082840152825180830384018152606090920190925280519101209056fea164736f6c6343000706000a", } var UpkeepPerformCounterRestrictiveABI = UpkeepPerformCounterRestrictiveMetaData.ABI @@ -214,6 +214,28 @@ func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSes return _UpkeepPerformCounterRestrictive.Contract.CheckEligible(&_UpkeepPerformCounterRestrictive.CallOpts) } +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) CheckGasToBurn(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "checkGasToBurn") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) CheckGasToBurn() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.CheckGasToBurn(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) CheckGasToBurn() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.CheckGasToBurn(&_UpkeepPerformCounterRestrictive.CallOpts) +} + func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) CheckUpkeep(opts *bind.CallOpts, data []byte) (bool, []byte, error) { var out []interface{} err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "checkUpkeep", data) @@ -237,6 +259,28 @@ func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSes return _UpkeepPerformCounterRestrictive.Contract.CheckUpkeep(&_UpkeepPerformCounterRestrictive.CallOpts, data) } +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "dummyMap", arg0) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) DummyMap(arg0 [32]byte) (bool, error) { + return _UpkeepPerformCounterRestrictive.Contract.DummyMap(&_UpkeepPerformCounterRestrictive.CallOpts, arg0) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) DummyMap(arg0 [32]byte) (bool, error) { + return _UpkeepPerformCounterRestrictive.Contract.DummyMap(&_UpkeepPerformCounterRestrictive.CallOpts, arg0) +} + func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) GetCountPerforms(opts *bind.CallOpts) (*big.Int, error) { var out []interface{} err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "getCountPerforms") @@ -303,6 +347,28 @@ func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSes return _UpkeepPerformCounterRestrictive.Contract.NextEligible(&_UpkeepPerformCounterRestrictive.CallOpts) } +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) PerformGasToBurn(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "performGasToBurn") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) PerformGasToBurn() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.PerformGasToBurn(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) PerformGasToBurn() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.PerformGasToBurn(&_UpkeepPerformCounterRestrictive.CallOpts) +} + func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) TestRange(opts *bind.CallOpts) (*big.Int, error) { var out []interface{} err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "testRange") @@ -325,16 +391,16 @@ func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSes return _UpkeepPerformCounterRestrictive.Contract.TestRange(&_UpkeepPerformCounterRestrictive.CallOpts) } -func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactor) PerformUpkeep(opts *bind.TransactOpts, data []byte) (*types.Transaction, error) { - return _UpkeepPerformCounterRestrictive.contract.Transact(opts, "performUpkeep", data) +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactor) PerformUpkeep(opts *bind.TransactOpts, arg0 []byte) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.contract.Transact(opts, "performUpkeep", arg0) } -func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) PerformUpkeep(data []byte) (*types.Transaction, error) { - return _UpkeepPerformCounterRestrictive.Contract.PerformUpkeep(&_UpkeepPerformCounterRestrictive.TransactOpts, data) +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) PerformUpkeep(arg0 []byte) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.PerformUpkeep(&_UpkeepPerformCounterRestrictive.TransactOpts, arg0) } -func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactorSession) PerformUpkeep(data []byte) (*types.Transaction, error) { - return _UpkeepPerformCounterRestrictive.Contract.PerformUpkeep(&_UpkeepPerformCounterRestrictive.TransactOpts, data) +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactorSession) PerformUpkeep(arg0 []byte) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.PerformUpkeep(&_UpkeepPerformCounterRestrictive.TransactOpts, arg0) } func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactor) Reset(opts *bind.TransactOpts) (*types.Transaction, error) { @@ -349,6 +415,30 @@ func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransacto return _UpkeepPerformCounterRestrictive.Contract.Reset(&_UpkeepPerformCounterRestrictive.TransactOpts) } +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactor) SetCheckGasToBurn(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.contract.Transact(opts, "setCheckGasToBurn", value) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) SetCheckGasToBurn(value *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.SetCheckGasToBurn(&_UpkeepPerformCounterRestrictive.TransactOpts, value) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactorSession) SetCheckGasToBurn(value *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.SetCheckGasToBurn(&_UpkeepPerformCounterRestrictive.TransactOpts, value) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactor) SetPerformGasToBurn(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.contract.Transact(opts, "setPerformGasToBurn", value) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) SetPerformGasToBurn(value *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.SetPerformGasToBurn(&_UpkeepPerformCounterRestrictive.TransactOpts, value) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactorSession) SetPerformGasToBurn(value *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.SetPerformGasToBurn(&_UpkeepPerformCounterRestrictive.TransactOpts, value) +} + func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactor) SetSpread(opts *bind.TransactOpts, _newTestRange *big.Int, _newAverageEligibilityCadence *big.Int) (*types.Transaction, error) { return _UpkeepPerformCounterRestrictive.contract.Transact(opts, "setSpread", _newTestRange, _newAverageEligibilityCadence) } @@ -505,20 +595,30 @@ type UpkeepPerformCounterRestrictiveInterface interface { CheckEligible(opts *bind.CallOpts) (bool, error) + CheckGasToBurn(opts *bind.CallOpts) (*big.Int, error) + CheckUpkeep(opts *bind.CallOpts, data []byte) (bool, []byte, error) + DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) + GetCountPerforms(opts *bind.CallOpts) (*big.Int, error) InitialCall(opts *bind.CallOpts) (*big.Int, error) NextEligible(opts *bind.CallOpts) (*big.Int, error) + PerformGasToBurn(opts *bind.CallOpts) (*big.Int, error) + TestRange(opts *bind.CallOpts) (*big.Int, error) - PerformUpkeep(opts *bind.TransactOpts, data []byte) (*types.Transaction, error) + PerformUpkeep(opts *bind.TransactOpts, arg0 []byte) (*types.Transaction, error) Reset(opts *bind.TransactOpts) (*types.Transaction, error) + SetCheckGasToBurn(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) + + SetPerformGasToBurn(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) + SetSpread(opts *bind.TransactOpts, _newTestRange *big.Int, _newAverageEligibilityCadence *big.Int) (*types.Transaction, error) FilterPerformingUpkeep(opts *bind.FilterOpts) (*UpkeepPerformCounterRestrictivePerformingUpkeepIterator, error) diff --git a/core/internal/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/internal/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt index 01491dc3eb1..96e8f225c40 100644 --- a/core/internal/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt +++ b/core/internal/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -1,33 +1,38 @@ GETH_VERSION: 1.10.16 -aggregator_v2v3_interface: ../../../contracts/solc/v0.8/AggregatorV2V3Interface.abi ../../../contracts/solc/v0.8/AggregatorV2V3Interface.bin 95e8814b408bb05bf21742ef580d98698b7db6a9bac6a35c3de12b23aec4ee28 -aggregator_v3_interface: ../../../contracts/solc/v0.8/AggregatorV3Interface.abi ../../../contracts/solc/v0.8/AggregatorV3Interface.bin 351b55d3b0f04af67db6dfb5c92f1c64479400ca1fec77afc20bc0ce65cb49ab -batch_blockhash_store: ../../../contracts/solc/v0.8/BatchBlockhashStore.abi ../../../contracts/solc/v0.8/BatchBlockhashStore.bin 9220f1ed6c576863a2f5b34263846660e742f3513c19eb9e2562a374cb70c252 +aggregator_v2v3_interface: ../../../contracts/solc/v0.8.6/AggregatorV2V3Interface.abi ../../../contracts/solc/v0.8.6/AggregatorV2V3Interface.bin 95e8814b408bb05bf21742ef580d98698b7db6a9bac6a35c3de12b23aec4ee28 +aggregator_v3_interface: ../../../contracts/solc/v0.8.6/AggregatorV3Interface.abi ../../../contracts/solc/v0.8.6/AggregatorV3Interface.bin 351b55d3b0f04af67db6dfb5c92f1c64479400ca1fec77afc20bc0ce65cb49ab +authorized_forwarder: ../../../contracts/solc/v0.7/AuthorizedForwarder.abi ../../../contracts/solc/v0.7/AuthorizedForwarder.bin 6d85fbc79b19342344d2a117cefd77f33dd278c2884e414c9f479e59656f8540 +authorized_receiver: ../../../contracts/solc/v0.7/AuthorizedReceiver.abi ../../../contracts/solc/v0.7/AuthorizedReceiver.bin 18e8969ba3234b027e1b16c11a783aca58d0ea5c2361010ec597f134b7bf1c4f +batch_blockhash_store: ../../../contracts/solc/v0.8.6/BatchBlockhashStore.abi ../../../contracts/solc/v0.8.6/BatchBlockhashStore.bin 9220f1ed6c576863a2f5b34263846660e742f3513c19eb9e2562a374cb70c252 +batch_vrf_coordinator_v2: ../../../contracts/solc/v0.8.13/BatchVRFCoordinatorV2.abi ../../../contracts/solc/v0.8.13/BatchVRFCoordinatorV2.bin be521b16957e7d84411e62daacb54da7ea136cd4b417330bb6773eff340ea645 blockhash_store: ../../../contracts/solc/v0.6/BlockhashStore.abi ../../../contracts/solc/v0.6/BlockhashStore.bin 6b3da771f033b3a5e53bf112396d0698debf65a8dcd81370f07d72948c8b14d9 consumer_wrapper: ../../../contracts/solc/v0.7/Consumer.abi ../../../contracts/solc/v0.7/Consumer.bin 894d1cbd920dccbd36d92918c1037c6ded34f66f417ccb18ec3f33c64ef83ec5 -derived_price_feed_wrapper: ../../../contracts/solc/v0.8/DerivedPriceFeed.abi ../../../contracts/solc/v0.8/DerivedPriceFeed.bin c8542e6c850c2d0fffb79a7f7213dc927ec64e6ddd54e1224cb2fb4a13aabdd0 +cron_upkeep_factory_wrapper: ../../../contracts/solc/v0.8.6/CronUpkeepFactory.abi - dacb0f8cdf54ae9d2781c5e720fc314b32ed5e58eddccff512c75d6067292cd7 +cron_upkeep_wrapper: ../../../contracts/solc/v0.8.6/CronUpkeep.abi - b1257293e0016a6d32f6febec5ff4cd8d7276d228c12228e6b6e143111f0c5ed +derived_price_feed_wrapper: ../../../contracts/solc/v0.8.6/DerivedPriceFeed.abi ../../../contracts/solc/v0.8.6/DerivedPriceFeed.bin c8542e6c850c2d0fffb79a7f7213dc927ec64e6ddd54e1224cb2fb4a13aabdd0 flags_wrapper: ../../../contracts/solc/v0.6/Flags.abi ../../../contracts/solc/v0.6/Flags.bin 2034d1b562ca37a63068851915e3703980276e8d5f7db6db8a3351a49d69fc4a flux_aggregator_wrapper: ../../../contracts/solc/v0.6/FluxAggregator.abi ../../../contracts/solc/v0.6/FluxAggregator.bin a3b0a6396c4aa3b5ee39b3c4bd45efc89789d4859379a8a92caca3a0496c5794 -keeper_registry_vb_wrapper: ../../../contracts/solc/v0.7/KeeperRegistryVB.abi ../../../contracts/solc/v0.7/KeeperRegistryVB.bin af13a53e7350624fab49e259ca484098361d7b72eb8f4e4338b659d766e5bee5 -keeper_registry_wrapper: ../../../contracts/solc/v0.7/KeeperRegistry.abi ../../../contracts/solc/v0.7/KeeperRegistry.bin f5c76c6fc35e775da0a4737beea8ebcb6f37ca78db3c21811f17b6f3d5623379 +keeper_registry_wrapper: ../../../contracts/solc/v0.7/KeeperRegistry.abi ../../../contracts/solc/v0.7/KeeperRegistry.bin fd5171038649a53203e3e8315f311320a43983858c8eaa709927cb00030b7f12 +log_emitter: ../../../contracts/solc/v0.8.6/LogEmitter.abi ../../../contracts/solc/v0.8.6/LogEmitter.bin 375488d19b6ee1c180d42048be10abea9146e2a58347fd180358850d435cb854 multiwordconsumer_wrapper: ../../../contracts/solc/v0.7/MultiWordConsumer.abi ../../../contracts/solc/v0.7/MultiWordConsumer.bin 6e68abdf614e3ed0f5066c1b5f9d7c1199f1e7c5c5251fe8a471344a59afc6ba offchain_aggregator_wrapper: OffchainAggregator/OffchainAggregator.abi - 5f97dc197fd4e2b999856b9b3fa7c2aaf0c700c71d7009d7d017d233bc855877 operator_wrapper: ../../../contracts/solc/v0.7/Operator.abi ../../../contracts/solc/v0.7/Operator.bin 08965dbb26f62739c1ce4d0941f30c0dd08003648823e7d722900b48270ffc2b oracle_wrapper: ../../../contracts/solc/v0.6/Oracle.abi ../../../contracts/solc/v0.6/Oracle.bin 7af2fbac22a6e8c2847e8e685a5400cac5101d72ddf5365213beb79e4dede43a solidity_vrf_consumer_interface: ../../../contracts/solc/v0.6/VRFConsumer.abi ../../../contracts/solc/v0.6/VRFConsumer.bin a79da241ca9525f6a96adc5f55651b1f91e1579b85fec7b02a51d06bd6018ee3 -solidity_vrf_consumer_interface_v08: ../../../contracts/solc/v0.8/VRFConsumer.abi ../../../contracts/solc/v0.8/VRFConsumer.bin 93311eeed53bc3127cfe46f906614a58dde3a95ec87b8c1caaa1be234384fefe +solidity_vrf_consumer_interface_v08: ../../../contracts/solc/v0.8.6/VRFConsumer.abi ../../../contracts/solc/v0.8.6/VRFConsumer.bin 93311eeed53bc3127cfe46f906614a58dde3a95ec87b8c1caaa1be234384fefe solidity_vrf_coordinator_interface: ../../../contracts/solc/v0.6/VRFCoordinator.abi ../../../contracts/solc/v0.6/VRFCoordinator.bin a23d3c395156804788c7f6fbda2994e8f7184304c0f0c9f2c4ddeaf073d346d2 solidity_vrf_request_id: ../../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper.abi ../../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper.bin 383b59e861732c1911ddb7b002c6158608496ce889979296527215fd0366b318 -solidity_vrf_request_id_v08: ../../../contracts/solc/v0.8/VRFRequestIDBaseTestHelper.abi ../../../contracts/solc/v0.8/VRFRequestIDBaseTestHelper.bin f2559015d6f3e5d285c57b011be9b2300632e93dd6c4524e58202d6200f09edc -solidity_vrf_v08_verifier_wrapper: ../../../contracts/solc/v0.8/VRFTestHelper.abi ../../../contracts/solc/v0.8/VRFTestHelper.bin f37f8b21a81c113085c6137835a2246db6ebda07da455c4f2b5c7ec60c725c3b +solidity_vrf_request_id_v08: ../../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper.abi ../../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper.bin f2559015d6f3e5d285c57b011be9b2300632e93dd6c4524e58202d6200f09edc +solidity_vrf_v08_verifier_wrapper: ../../../contracts/solc/v0.8.6/VRFTestHelper.abi ../../../contracts/solc/v0.8.6/VRFTestHelper.bin f37f8b21a81c113085c6137835a2246db6ebda07da455c4f2b5c7ec60c725c3b solidity_vrf_verifier_wrapper: ../../../contracts/solc/v0.6/VRFTestHelper.abi ../../../contracts/solc/v0.6/VRFTestHelper.bin 44c2b67d8d2990ab580453deb29d63508c6147a3dc49908a1db563bef06e6474 upkeep_counter_wrapper: ../../../contracts/solc/v0.7/UpkeepCounter.abi ../../../contracts/solc/v0.7/UpkeepCounter.bin 901961ebf18906febc1c350f02da85c7ea1c2a68da70cfd94efa27c837a48663 -upkeep_perform_counter_restrictive_wrapper: ../../../contracts/solc/v0.7/UpkeepPerformCounterRestrictive.abi ../../../contracts/solc/v0.7/UpkeepPerformCounterRestrictive.bin 0e9c8a89d38491ca7f65e75201a8666c5e548846a1cd85a209e580dad06db386 -vrf_consumer_v2: ../../../contracts/solc/v0.8/VRFConsumerV2.abi ../../../contracts/solc/v0.8/VRFConsumerV2.bin ad31c7a9e17e0e5d2e334f6478b7a2435a17c12f9fc33143729d7452bfd0cd91 -vrf_coordinator_v2: ../../../contracts/solc/v0.8/VRFCoordinatorV2.abi ../../../contracts/solc/v0.8/VRFCoordinatorV2.bin 7839d54d662197ad8f987495b6461e8ea9c66746b79a744bfd74ff086c276be0 -vrf_external_sub_owner_example: ../../../contracts/solc/v0.8/VRFExternalSubOwnerExample.abi ../../../contracts/solc/v0.8/VRFExternalSubOwnerExample.bin 14f888eb313930b50233a6f01ea31eba0206b7f41a41f6311670da8bb8a26963 -vrf_load_test_external_sub_owner: ../../../contracts/solc/v0.8/VRFLoadTestExternalSubOwner.abi ../../../contracts/solc/v0.8/VRFLoadTestExternalSubOwner.bin 2097faa70265e420036cc8a3efb1f1e0836ad2d7323b295b9a26a125dbbe6c7d -vrf_load_test_ownerless_consumer: ../../../contracts/solc/v0.8/VRFLoadTestOwnerlessConsumer.abi ../../../contracts/solc/v0.8/VRFLoadTestOwnerlessConsumer.bin 74f914843cbc70b9c3079c3e1c709382ce415225e8bb40113e7ac018bfcb0f5c -vrf_malicious_consumer_v2: ../../../contracts/solc/v0.8/VRFMaliciousConsumerV2.abi ../../../contracts/solc/v0.8/VRFMaliciousConsumerV2.bin 68ecbaa5dfa616ed5d628e57772fa17569719fc0b0a2a10bcd27ba01c49bc998 -vrf_ownerless_consumer_example: ../../../contracts/solc/v0.8/VRFOwnerlessConsumerExample.abi ../../../contracts/solc/v0.8/VRFOwnerlessConsumerExample.bin 9893b3805863273917fb282eed32274e32aa3d5c2a67a911510133e1218132be -vrf_single_consumer_example: ../../../contracts/solc/v0.8/VRFSingleConsumerExample.abi ../../../contracts/solc/v0.8/VRFSingleConsumerExample.bin 892a5ed35da2e933f7fd7835cd6f7f70ef3aa63a9c03a22c5b1fd026711b0ece -vrfv2_reverting_example: ../../../contracts/solc/v0.8/VRFV2RevertingExample.abi ../../../contracts/solc/v0.8/VRFV2RevertingExample.bin 0e4eb8e0f92fab1f74a6bef7951b511e2c2f7b7134ca1dc65928234ec73dca9a +upkeep_perform_counter_restrictive_wrapper: ../../../contracts/solc/v0.7/UpkeepPerformCounterRestrictive.abi ../../../contracts/solc/v0.7/UpkeepPerformCounterRestrictive.bin 8975a058fba528e16d8414dc6f13946d17a145fcbc66cf25a32449b6fe1ce878 +vrf_consumer_v2: ../../../contracts/solc/v0.8.6/VRFConsumerV2.abi ../../../contracts/solc/v0.8.6/VRFConsumerV2.bin ad31c7a9e17e0e5d2e334f6478b7a2435a17c12f9fc33143729d7452bfd0cd91 +vrf_coordinator_v2: ../../../contracts/solc/v0.8.6/VRFCoordinatorV2.abi ../../../contracts/solc/v0.8.6/VRFCoordinatorV2.bin 7839d54d662197ad8f987495b6461e8ea9c66746b79a744bfd74ff086c276be0 +vrf_external_sub_owner_example: ../../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample.abi ../../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample.bin 14f888eb313930b50233a6f01ea31eba0206b7f41a41f6311670da8bb8a26963 +vrf_load_test_external_sub_owner: ../../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner.abi ../../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner.bin 2097faa70265e420036cc8a3efb1f1e0836ad2d7323b295b9a26a125dbbe6c7d +vrf_load_test_ownerless_consumer: ../../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer.abi ../../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer.bin 74f914843cbc70b9c3079c3e1c709382ce415225e8bb40113e7ac018bfcb0f5c +vrf_malicious_consumer_v2: ../../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2.abi ../../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2.bin 68ecbaa5dfa616ed5d628e57772fa17569719fc0b0a2a10bcd27ba01c49bc998 +vrf_ownerless_consumer_example: ../../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample.abi ../../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample.bin 9893b3805863273917fb282eed32274e32aa3d5c2a67a911510133e1218132be +vrf_single_consumer_example: ../../../contracts/solc/v0.8.6/VRFSingleConsumerExample.abi ../../../contracts/solc/v0.8.6/VRFSingleConsumerExample.bin 892a5ed35da2e933f7fd7835cd6f7f70ef3aa63a9c03a22c5b1fd026711b0ece +vrfv2_reverting_example: ../../../contracts/solc/v0.8.6/VRFV2RevertingExample.abi ../../../contracts/solc/v0.8.6/VRFV2RevertingExample.bin 0e4eb8e0f92fab1f74a6bef7951b511e2c2f7b7134ca1dc65928234ec73dca9a diff --git a/core/internal/gethwrappers/go_generate.go b/core/internal/gethwrappers/go_generate.go index ba569442fcf..97eed84ff25 100644 --- a/core/internal/gethwrappers/go_generate.go +++ b/core/internal/gethwrappers/go_generate.go @@ -16,39 +16,47 @@ package gethwrappers //go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.7/Consumer.abi ../../../contracts/solc/v0.7/Consumer.bin Consumer consumer_wrapper //go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.7/MultiWordConsumer.abi ../../../contracts/solc/v0.7/MultiWordConsumer.bin MultiWordConsumer multiwordconsumer_wrapper //go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.7/Operator.abi ../../../contracts/solc/v0.7/Operator.bin Operator operator_wrapper -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/BatchBlockhashStore.abi ../../../contracts/solc/v0.8/BatchBlockhashStore.bin BatchBlockhashStore batch_blockhash_store +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.7/AuthorizedForwarder.abi ../../../contracts/solc/v0.7/AuthorizedForwarder.bin AuthorizedForwarder authorized_forwarder +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.7/AuthorizedReceiver.abi ../../../contracts/solc/v0.7/AuthorizedReceiver.bin AuthorizedReceiver authorized_receiver +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/BatchBlockhashStore.abi ../../../contracts/solc/v0.8.6/BatchBlockhashStore.bin BatchBlockhashStore batch_blockhash_store +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.13/BatchVRFCoordinatorV2.abi ../../../contracts/solc/v0.8.13/BatchVRFCoordinatorV2.bin BatchVRFCoordinatorV2 batch_vrf_coordinator_v2 //go:generate go run ./generation/generate/wrap.go OffchainAggregator/OffchainAggregator.abi - OffchainAggregator offchain_aggregator_wrapper //go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.7/KeeperRegistry.abi ../../../contracts/solc/v0.7/KeeperRegistry.bin KeeperRegistry keeper_registry_wrapper -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.7/KeeperRegistryVB.abi ../../../contracts/solc/v0.7/KeeperRegistryVB.bin KeeperRegistryVB keeper_registry_vb_wrapper //go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.7/UpkeepPerformCounterRestrictive.abi ../../../contracts/solc/v0.7/UpkeepPerformCounterRestrictive.bin UpkeepPerformCounterRestrictive upkeep_perform_counter_restrictive_wrapper //go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.7/UpkeepCounter.abi ../../../contracts/solc/v0.7/UpkeepCounter.bin UpkeepCounter upkeep_counter_wrapper +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/CronUpkeepFactory.abi - CronUpkeepFactory cron_upkeep_factory_wrapper +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/CronUpkeep.abi - CronUpkeep cron_upkeep_wrapper -// v0.8 VRFConsumer -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/VRFConsumer.abi ../../../contracts/solc/v0.8/VRFConsumer.bin VRFConsumer solidity_vrf_consumer_interface_v08 -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/VRFRequestIDBaseTestHelper.abi ../../../contracts/solc/v0.8/VRFRequestIDBaseTestHelper.bin VRFRequestIDBaseTestHelper solidity_vrf_request_id_v08 -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/VRFOwnerlessConsumerExample.abi ../../../contracts/solc/v0.8/VRFOwnerlessConsumerExample.bin VRFOwnerlessConsumerExample vrf_ownerless_consumer_example -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/VRFLoadTestOwnerlessConsumer.abi ../../../contracts/solc/v0.8/VRFLoadTestOwnerlessConsumer.bin VRFLoadTestOwnerlessConsumer vrf_load_test_ownerless_consumer -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/VRFLoadTestExternalSubOwner.abi ../../../contracts/solc/v0.8/VRFLoadTestExternalSubOwner.bin VRFLoadTestExternalSubOwner vrf_load_test_external_sub_owner +// v0.8.6 VRFConsumer +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/VRFConsumer.abi ../../../contracts/solc/v0.8.6/VRFConsumer.bin VRFConsumer solidity_vrf_consumer_interface_v08 +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper.abi ../../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper.bin VRFRequestIDBaseTestHelper solidity_vrf_request_id_v08 +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample.abi ../../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample.bin VRFOwnerlessConsumerExample vrf_ownerless_consumer_example +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer.abi ../../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer.bin VRFLoadTestOwnerlessConsumer vrf_load_test_ownerless_consumer +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner.abi ../../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner.bin VRFLoadTestExternalSubOwner vrf_load_test_external_sub_owner -//go:generate mockery --recursive --name FluxAggregatorInterface --output ../mocks/ --case=underscore --structname FluxAggregator --filename flux_aggregator.go -//go:generate mockery --recursive --name FlagsInterface --output ../mocks/ --case=underscore --structname Flags --filename flags.go +//go:generate mockery --srcpkg github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flux_aggregator_wrapper --name FluxAggregatorInterface --output ../mocks/ --case=underscore --structname FluxAggregator --filename flux_aggregator.go +//go:generate mockery --srcpkg github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flags_wrapper --name FlagsInterface --output ../mocks/ --case=underscore --structname Flags --filename flags.go +//go:generate mockery --srcpkg github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/aggregator_v3_interface --name AggregatorV3InterfaceInterface --output ../../services/vrf/mocks/ --case=underscore --structname AggregatorV3Interface --filename aggregator_v3_interface.go //go:generate go run ./generation/generate_link/wrap_link.go // VRF V2 -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/VRFCoordinatorV2.abi ../../../contracts/solc/v0.8/VRFCoordinatorV2.bin VRFCoordinatorV2 vrf_coordinator_v2 -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/VRFConsumerV2.abi ../../../contracts/solc/v0.8/VRFConsumerV2.bin VRFConsumerV2 vrf_consumer_v2 -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/VRFMaliciousConsumerV2.abi ../../../contracts/solc/v0.8/VRFMaliciousConsumerV2.bin VRFMaliciousConsumerV2 vrf_malicious_consumer_v2 -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/VRFTestHelper.abi ../../../contracts/solc/v0.8/VRFTestHelper.bin VRFV08TestHelper solidity_vrf_v08_verifier_wrapper -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/VRFSingleConsumerExample.abi ../../../contracts/solc/v0.8/VRFSingleConsumerExample.bin VRFSingleConsumerExample vrf_single_consumer_example -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/VRFExternalSubOwnerExample.abi ../../../contracts/solc/v0.8/VRFExternalSubOwnerExample.bin VRFExternalSubOwnerExample vrf_external_sub_owner_example -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/VRFV2RevertingExample.abi ../../../contracts/solc/v0.8/VRFV2RevertingExample.bin VRFV2RevertingExample vrfv2_reverting_example +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/VRFCoordinatorV2.abi ../../../contracts/solc/v0.8.6/VRFCoordinatorV2.bin VRFCoordinatorV2 vrf_coordinator_v2 +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/VRFConsumerV2.abi ../../../contracts/solc/v0.8.6/VRFConsumerV2.bin VRFConsumerV2 vrf_consumer_v2 +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2.abi ../../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2.bin VRFMaliciousConsumerV2 vrf_malicious_consumer_v2 +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/VRFTestHelper.abi ../../../contracts/solc/v0.8.6/VRFTestHelper.bin VRFV08TestHelper solidity_vrf_v08_verifier_wrapper +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/VRFSingleConsumerExample.abi ../../../contracts/solc/v0.8.6/VRFSingleConsumerExample.bin VRFSingleConsumerExample vrf_single_consumer_example +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample.abi ../../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample.bin VRFExternalSubOwnerExample vrf_external_sub_owner_example +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/VRFV2RevertingExample.abi ../../../contracts/solc/v0.8.6/VRFV2RevertingExample.bin VRFV2RevertingExample vrfv2_reverting_example // Aggregators -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/AggregatorV2V3Interface.abi ../../../contracts/solc/v0.8/AggregatorV2V3Interface.bin AggregatorV2V3Interface aggregator_v2v3_interface -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/AggregatorV3Interface.abi ../../../contracts/solc/v0.8/AggregatorV3Interface.bin AggregatorV3Interface aggregator_v3_interface -//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8/DerivedPriceFeed.abi ../../../contracts/solc/v0.8/DerivedPriceFeed.bin DerivedPriceFeed derived_price_feed_wrapper +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/AggregatorV2V3Interface.abi ../../../contracts/solc/v0.8.6/AggregatorV2V3Interface.bin AggregatorV2V3Interface aggregator_v2v3_interface +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/AggregatorV3Interface.abi ../../../contracts/solc/v0.8.6/AggregatorV3Interface.bin AggregatorV3Interface aggregator_v3_interface +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/DerivedPriceFeed.abi ../../../contracts/solc/v0.8.6/DerivedPriceFeed.bin DerivedPriceFeed derived_price_feed_wrapper + +// Log tester +//go:generate go run ./generation/generate/wrap.go ../../../contracts/solc/v0.8.6/LogEmitter.abi ../../../contracts/solc/v0.8.6/LogEmitter.bin LogEmitter log_emitter // To run these commands, you must either install docker, or the correct version // of abigen. The latter can be installed with these commands, at least on linux: diff --git a/core/internal/gethwrappers2/compile.sh b/core/internal/gethwrappers2/compile.sh old mode 100644 new mode 100755 diff --git a/core/internal/mocks/after_nower.go b/core/internal/mocks/after_nower.go index 535116ccc67..4b546a3e50a 100644 --- a/core/internal/mocks/after_nower.go +++ b/core/internal/mocks/after_nower.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/application.go b/core/internal/mocks/application.go index 8aeb113c8d5..035c6ced692 100644 --- a/core/internal/mocks/application.go +++ b/core/internal/mocks/application.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/flags.go b/core/internal/mocks/flags.go index 12a5016f8c8..5663defc4dc 100644 --- a/core/internal/mocks/flags.go +++ b/core/internal/mocks/flags.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/flux_aggregator.go b/core/internal/mocks/flux_aggregator.go index 0e0ff9ac6e3..a5e1f30bd15 100644 --- a/core/internal/mocks/flux_aggregator.go +++ b/core/internal/mocks/flux_aggregator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/prometheus_backend.go b/core/internal/mocks/prometheus_backend.go index 6d8d4978c5e..d866b9428ba 100644 --- a/core/internal/mocks/prometheus_backend.go +++ b/core/internal/mocks/prometheus_backend.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/internal/testutils/configtest/general_config.go b/core/internal/testutils/configtest/general_config.go index fe18d6946e8..3f4548a0026 100644 --- a/core/internal/testutils/configtest/general_config.go +++ b/core/internal/testutils/configtest/general_config.go @@ -88,6 +88,8 @@ type GeneralConfigOverrides struct { KeeperMaximumGracePeriod null.Int KeeperRegistrySyncInterval *time.Duration KeeperRegistrySyncUpkeepQueueSize null.Int + KeeperTurnLookBack null.Int + KeeperTurnFlagEnabled null.Bool LeaseLockDuration *time.Duration LeaseLockRefreshInterval *time.Duration LogFileDir null.String @@ -111,6 +113,7 @@ type GeneralConfigOverrides struct { EVMRPCEnabled null.Bool TerraEnabled null.Bool P2PEnabled null.Bool + SolanaEnabled null.Bool // OCR v2 OCR2DatabaseTimeout *time.Duration @@ -289,6 +292,14 @@ func (c *TestGeneralConfig) TerraEnabled() bool { return c.GeneralConfig.TerraEnabled() } +// SolanaEnabled allows Solana to be used +func (c *TestGeneralConfig) SolanaEnabled() bool { + if c.Overrides.SolanaEnabled.Valid { + return c.Overrides.SolanaEnabled.Bool + } + return c.GeneralConfig.SolanaEnabled() +} + func (c *TestGeneralConfig) EthereumURL() string { if c.Overrides.EthereumURL.Valid { return c.Overrides.EthereumURL.String @@ -459,6 +470,20 @@ func (c *TestGeneralConfig) KeeperMaximumGracePeriod() int64 { return c.GeneralConfig.KeeperMaximumGracePeriod() } +func (c *TestGeneralConfig) KeeperTurnLookBack() int64 { + if c.Overrides.KeeperTurnLookBack.Valid { + return c.Overrides.KeeperTurnLookBack.Int64 + } + return c.GeneralConfig.KeeperTurnLookBack() +} + +func (c *TestGeneralConfig) KeeperTurnFlagEnabled() bool { + if c.Overrides.KeeperTurnFlagEnabled.Valid { + return c.Overrides.KeeperTurnFlagEnabled.Bool + } + return c.GeneralConfig.KeeperTurnFlagEnabled() +} + func (c *TestGeneralConfig) BlockBackfillSkip() bool { if c.Overrides.BlockBackfillSkip.Valid { return c.Overrides.BlockBackfillSkip.Bool diff --git a/core/internal/testutils/evmtest/evmtest.go b/core/internal/testutils/evmtest/evmtest.go index 3dcd1295d26..050c776ec1e 100644 --- a/core/internal/testutils/evmtest/evmtest.go +++ b/core/internal/testutils/evmtest/evmtest.go @@ -1,8 +1,18 @@ package evmtest import ( + "database/sql" "math/big" + "math/rand" + "sync" "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/stretchr/testify/mock" + "go.uber.org/atomic" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" "github.com/smartcontractkit/sqlx" "github.com/stretchr/testify/require" @@ -13,9 +23,11 @@ import ( evmconfig "github.com/smartcontractkit/chainlink/core/chains/evm/config" httypes "github.com/smartcontractkit/chainlink/core/chains/evm/headtracker/types" "github.com/smartcontractkit/chainlink/core/chains/evm/log" + evmMocks "github.com/smartcontractkit/chainlink/core/chains/evm/mocks" "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" "github.com/smartcontractkit/chainlink/core/config" + "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/internal/testutils/configtest" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/keystore" @@ -74,18 +86,20 @@ func NewChainSet(t testing.TB, testopts TestChainOpts) evm.ChainSet { chains := []evmtypes.Chain{ { - ID: *utils.NewBigI(0), - Cfg: testopts.ChainCfg, - Nodes: []evmtypes.Node{{ - Name: "evm-test-only-0", - EVMChainID: *utils.NewBigI(0), - WSURL: null.StringFrom("ws://example.invalid"), - }}, + ID: *utils.NewBigI(0), + Cfg: testopts.ChainCfg, Enabled: true, }, } + nodes := map[string][]evmtypes.Node{ + "0": {{ + Name: "evm-test-only-0", + EVMChainID: *utils.NewBigI(0), + WSURL: null.StringFrom("ws://example.invalid"), + }}, + } - cc, err := evm.NewChainSet(opts, chains) + cc, err := evm.NewChainSet(opts, chains, nodes) require.NoError(t, err) return cc } @@ -105,65 +119,132 @@ INSERT INTO evm_chains (id, cfg, enabled, created_at, updated_at) VALUES (:id, : } type MockORM struct { - chains []evmtypes.Chain + mu sync.RWMutex + chains map[string]evmtypes.Chain + nodes map[string][]evmtypes.Node } var _ evmtypes.ORM = &MockORM{} -func NewMockORM(chains []evmtypes.Chain) *MockORM { +func NewMockORM(chains []evmtypes.Chain, nodes []evmtypes.Node) *MockORM { mo := &MockORM{ - chains: chains, + chains: make(map[string]evmtypes.Chain), + nodes: make(map[string][]evmtypes.Node), } + mo.PutChains(chains...) + mo.AddNodes(nodes...) return mo } -func (mo *MockORM) EnabledChainsWithNodes() ([]evmtypes.Chain, error) { - return mo.chains, nil +func (mo *MockORM) PutChains(cs ...evmtypes.Chain) { + for _, c := range cs { + mo.chains[c.ID.String()] = c + } } -func (mo *MockORM) StoreString(chainID *big.Int, key, val string) error { - return nil +func (mo *MockORM) AddNodes(ns ...evmtypes.Node) { + for _, n := range ns { + id := n.EVMChainID.String() + mo.nodes[id] = append(mo.nodes[id], n) + } } -func (mo *MockORM) Clear(chainID *big.Int, key string) error { - return nil +func (mo *MockORM) EnabledChains(qopts ...pg.QOpt) ([]evmtypes.Chain, error) { + mo.mu.RLock() + defer mo.mu.RUnlock() + return maps.Values(mo.chains), nil } -func (mo *MockORM) Chain(id utils.Big) (evmtypes.Chain, error) { +func (mo *MockORM) StoreString(chainID utils.Big, key, val string) error { panic("not implemented") } -func (mo *MockORM) CreateChain(id utils.Big, config evmtypes.ChainCfg) (evmtypes.Chain, error) { +func (mo *MockORM) Clear(chainID utils.Big, key string) error { panic("not implemented") } -func (mo *MockORM) UpdateChain(id utils.Big, enabled bool, config evmtypes.ChainCfg) (evmtypes.Chain, error) { - return evmtypes.Chain{}, nil +func (mo *MockORM) Chain(id utils.Big, qopts ...pg.QOpt) (evmtypes.Chain, error) { + mo.mu.RLock() + defer mo.mu.RUnlock() + c, ok := mo.chains[id.String()] + if !ok { + return evmtypes.Chain{}, sql.ErrNoRows + } + return c, nil } -func (mo *MockORM) DeleteChain(id utils.Big) error { +func (mo *MockORM) CreateChain(id utils.Big, config evmtypes.ChainCfg, qopts ...pg.QOpt) (evmtypes.Chain, error) { panic("not implemented") } -func (mo *MockORM) Chains(offset int, limit int) ([]evmtypes.Chain, int, error) { +func (mo *MockORM) UpdateChain(id utils.Big, enabled bool, config evmtypes.ChainCfg, qopts ...pg.QOpt) (evmtypes.Chain, error) { + return evmtypes.Chain{}, nil +} + +func (mo *MockORM) DeleteChain(id utils.Big, qopts ...pg.QOpt) error { panic("not implemented") } +func (mo *MockORM) Chains(offset int, limit int, qopts ...pg.QOpt) (chains []evmtypes.Chain, count int, err error) { + mo.mu.RLock() + defer mo.mu.RUnlock() + chains = maps.Values(mo.chains) + count = len(chains) + return +} + func (mo *MockORM) GetChainsByIDs(ids []utils.Big) (chains []evmtypes.Chain, err error) { - panic("not implemented") + mo.mu.RLock() + defer mo.mu.RUnlock() + for _, id := range ids { + c, ok := mo.chains[id.String()] + if ok { + chains = append(chains, c) + } + } + return } -func (mo *MockORM) CreateNode(data evmtypes.NewNode) (evmtypes.Node, error) { - panic("not implemented") +func (mo *MockORM) CreateNode(data evmtypes.Node, qopts ...pg.QOpt) (n evmtypes.Node, err error) { + mo.mu.Lock() + defer mo.mu.Unlock() + n.ID = rand.Int31() + n.Name = data.Name + n.EVMChainID = data.EVMChainID + n.WSURL = data.WSURL + n.HTTPURL = data.HTTPURL + n.SendOnly = data.SendOnly + n.CreatedAt = time.Now() + n.UpdatedAt = n.CreatedAt + mo.AddNodes(n) + return n, nil } -func (mo *MockORM) DeleteNode(id int64) error { - panic("not implemented") +func (mo *MockORM) DeleteNode(id int32, qopts ...pg.QOpt) error { + mo.mu.Lock() + defer mo.mu.Unlock() + for chainID, ns := range mo.nodes { + i := slices.IndexFunc(ns, func(n evmtypes.Node) bool { + return n.ID == id + }) + if i < 0 { + continue + } + mo.nodes[chainID] = slices.Delete(ns, i, i) + return nil + } + return sql.ErrNoRows } // Nodes implements evmtypes.ORM -func (mo *MockORM) Nodes(offset int, limit int, qopts ...pg.QOpt) ([]evmtypes.Node, int, error) { - panic("not implemented") +func (mo *MockORM) Nodes(offset int, limit int, qopts ...pg.QOpt) (nodes []evmtypes.Node, cnt int, err error) { + mo.mu.RLock() + defer mo.mu.RUnlock() + for _, ns := range maps.Values(mo.nodes) { + nodes = append(nodes, ns...) + } + cnt = len(nodes) + return } // Node implements evmtypes.ORM @@ -181,6 +262,11 @@ func (mo *MockORM) NodesForChain(chainID utils.Big, offset int, limit int, qopts panic("not implemented") } +// NodesForChain implements evmtypes.ORM +func (mo *MockORM) SetupNodes([]evmtypes.Node, []utils.Big) error { + panic("not implemented") +} + func ChainEthMainnet(t *testing.T) evmconfig.ChainScopedConfig { return scopedConfig(t, 1) } func ChainOptimismMainnet(t *testing.T) evmconfig.ChainScopedConfig { return scopedConfig(t, 10) } func ChainOptimismKovan(t *testing.T) evmconfig.ChainScopedConfig { return scopedConfig(t, 69) } @@ -191,3 +277,83 @@ func scopedConfig(t *testing.T, chainID int64) evmconfig.ChainScopedConfig { return evmconfig.NewChainScopedConfig(big.NewInt(chainID), evmtypes.ChainCfg{}, nil, logger.TestLogger(t), configtest.NewTestGeneralConfig(t)) } + +func NewEthClientMock(t mock.TestingT) *evmMocks.Client { + mockEth := new(evmMocks.Client) + mockEth.Test(t) + return mockEth +} + +func NewEthClientMockWithDefaultChain(t testing.TB) *evmMocks.Client { + c := NewEthClientMock(t) + c.On("ChainID").Return(testutils.FixtureChainID).Maybe() + return c +} + +type MockEth struct { + EthClient *evmMocks.Client + CheckFilterLogs func(int64, int64) + + subs []*evmMocks.Subscription + errChs []chan error + subscribeCalls atomic.Int32 + unsubscribeCalls atomic.Int32 +} + +func (m *MockEth) AssertExpectations(t *testing.T) { + m.EthClient.AssertExpectations(t) + for _, sub := range m.subs { + sub.AssertExpectations(t) + } +} + +func (m *MockEth) SubscribeCallCount() int32 { + return m.subscribeCalls.Load() +} + +func (m *MockEth) UnsubscribeCallCount() int32 { + return m.unsubscribeCalls.Load() +} + +func (m *MockEth) NewSub(t *testing.T) ethereum.Subscription { + m.subscribeCalls.Inc() + sub := new(evmMocks.Subscription) + sub.Test(t) + errCh := make(chan error) + sub.On("Err"). + Return(func() <-chan error { return errCh }) + sub.On("Unsubscribe"). + Run(func(mock.Arguments) { + m.unsubscribeCalls.Inc() + close(errCh) + }).Return().Maybe() + m.subs = append(m.subs, sub) + m.errChs = append(m.errChs, errCh) + return sub +} + +func (m *MockEth) SubsErr(err error) { + for _, errCh := range m.errChs { + errCh <- err + } +} + +type RawSub[T any] struct { + ch chan<- T + err <-chan error +} + +func NewRawSub[T any](ch chan<- T, err <-chan error) RawSub[T] { + return RawSub[T]{ch: ch, err: err} +} + +func (r *RawSub[T]) CloseCh() { + close(r.ch) +} + +func (r *RawSub[T]) TrySend(t T) { + select { + case <-r.err: + case r.ch <- t: + } +} diff --git a/core/internal/testutils/logger.go b/core/internal/testutils/logger.go new file mode 100644 index 00000000000..04c69f25ef3 --- /dev/null +++ b/core/internal/testutils/logger.go @@ -0,0 +1,36 @@ +package testutils + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + "github.com/smartcontractkit/chainlink/core/logger" +) + +// LoggerAssertMaxLevel returns a test logger which is observed on cleanup +// and asserts that no lines were logged at a higher level. +func LoggerAssertMaxLevel(t *testing.T, lvl zapcore.Level) logger.Logger { + if lvl >= zapcore.FatalLevel { + t.Fatalf("no levels exist after %s", zapcore.FatalLevel) + } + lggr, o := logger.TestLoggerObserved(t, lvl+1) + t.Cleanup(func() { + assert.Empty(t, o.Len(), fmt.Sprintf("logger contains entries with levels above %q:\n%s", lvl, loggedEntries(o.All()))) + }) + return lggr +} + +type loggedEntries []observer.LoggedEntry + +func (logs loggedEntries) String() string { + var sb strings.Builder + for _, l := range logs { + fmt.Fprintln(&sb, l) + } + return sb.String() +} diff --git a/core/internal/testutils/solanatest/solanatest.go b/core/internal/testutils/solanatest/solanatest.go new file mode 100644 index 00000000000..2be4b301851 --- /dev/null +++ b/core/internal/testutils/solanatest/solanatest.go @@ -0,0 +1,25 @@ +package solanatest + +import ( + "testing" + + "github.com/google/uuid" + "github.com/smartcontractkit/sqlx" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" +) + +// MustInsertChain inserts chain in to db, or fails the test. +func MustInsertChain(t testing.TB, db *sqlx.DB, chain *db.Chain) { + query, args, e := db.BindNamed(` +INSERT INTO solana_chains (id, cfg, enabled, created_at, updated_at) VALUES (:id, :cfg, :enabled, NOW(), NOW()) RETURNING *;`, chain) + require.NoError(t, e) + err := db.Get(chain, query, args...) + require.NoError(t, err) +} + +// RandomChainID returns a random uuid id for testing. Use this instead of a constant to prevent DB collisions. +func RandomChainID() string { + return uuid.New().String() +} diff --git a/core/internal/testutils/testutils.go b/core/internal/testutils/testutils.go index bdd08851eca..75e4b8c3859 100644 --- a/core/internal/testutils/testutils.go +++ b/core/internal/testutils/testutils.go @@ -15,8 +15,10 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" "github.com/gorilla/websocket" "github.com/tidwall/gjson" "go.uber.org/zap/zaptest/observer" @@ -32,6 +34,19 @@ import ( // "test" chain ID to be used without clashes var FixtureChainID = big.NewInt(0) +// SimulatedChainID is the chain ID for the go-ethereum simulated backend +var SimulatedChainID = big.NewInt(1337) + +// MustNewSimTransactor returns a transactor for interacting with the +// geth simulated backend. +func MustNewSimTransactor(t *testing.T) *bind.TransactOpts { + key, err := crypto.GenerateKey() + require.NoError(t, err) + transactor, err := bind.NewKeyedTransactorWithChainID(key, SimulatedChainID) + require.NoError(t, err) + return transactor +} + // NewAddress return a random new address func NewAddress() common.Address { return common.BytesToAddress(randomBytes(20)) @@ -101,6 +116,14 @@ func MustParseURL(t *testing.T, input string) *url.URL { return u } +// MustParseBigInt parses a big int value from string or fails the test +func MustParseBigInt(t *testing.T, input string) *big.Int { + i := new(big.Int) + _, err := fmt.Sscan(input, i) + require.NoError(t, err) + return i +} + // JSONRPCHandler is called with the method and request param(s). // respResult will be sent immediately. notifyResult is optional, and sent after a short delay. type JSONRPCHandler func(reqMethod string, reqParams gjson.Result) (respResult, notifyResult string) diff --git a/core/logger/logger.go b/core/logger/logger.go index 71d0a62c596..af5e6f5ab74 100644 --- a/core/logger/logger.go +++ b/core/logger/logger.go @@ -168,9 +168,10 @@ func verShaName(ver, sha string) string { func NewLogger() (Logger, func() error) { var c Config var parseErrs []string + var warnings []string var invalid string - c.LogLevel, invalid = envvar.LogLevel.ParseLogLevel() + c.LogLevel, invalid = envvar.LogLevel.Parse() if invalid != "" { parseErrs = append(parseErrs, invalid) } @@ -178,23 +179,30 @@ func NewLogger() (Logger, func() error) { c.Dir = os.Getenv("LOG_FILE_DIR") if c.Dir == "" { var invalid2 string - c.Dir, invalid2 = envvar.RootDir.ParseString() + c.Dir, invalid2 = envvar.RootDir.Parse() if invalid2 != "" { parseErrs = append(parseErrs, invalid2) } } - c.JsonConsole, invalid = envvar.JSONConsole.ParseBool() + c.JsonConsole, invalid = envvar.JSONConsole.Parse() if invalid != "" { parseErrs = append(parseErrs, invalid) } var fileMaxSize utils.FileSize - fileMaxSize, invalid = envvar.LogFileMaxSize.ParseFileSize() - c.FileMaxSize = int(fileMaxSize) + fileMaxSize, invalid = envvar.LogFileMaxSize.Parse() if invalid != "" { parseErrs = append(parseErrs, invalid) } + if fileMaxSize <= 0 { + c.FileMaxSizeMB = 0 // disabled + } else if fileMaxSize < utils.MB { + c.FileMaxSizeMB = 1 // 1Mb is the minimum accepted by logging backend + warnings = append(warnings, fmt.Sprintf("LogFileMaxSize %s is too small: using default %s", fileMaxSize, utils.FileSize(utils.MB))) + } else { + c.FileMaxSizeMB = int(fileMaxSize / utils.MB) + } if c.DebugLogsToDisk() { var ( @@ -202,20 +210,20 @@ func NewLogger() (Logger, func() error) { maxBackups int64 ) - fileMaxAge, invalid = envvar.LogFileMaxAge.ParseInt64() - c.FileMaxAge = int(fileMaxAge) + fileMaxAge, invalid = envvar.LogFileMaxAge.Parse() + c.FileMaxAgeDays = int(fileMaxAge) if invalid != "" { parseErrs = append(parseErrs, invalid) } - maxBackups, invalid = envvar.LogFileMaxBackups.ParseInt64() + maxBackups, invalid = envvar.LogFileMaxBackups.Parse() c.FileMaxBackups = int(maxBackups) if invalid != "" { parseErrs = append(parseErrs, invalid) } } - c.UnixTS, invalid = envvar.LogUnixTS.ParseBool() + c.UnixTS, invalid = envvar.LogUnixTS.Parse() if invalid != "" { parseErrs = append(parseErrs, invalid) } @@ -224,6 +232,9 @@ func NewLogger() (Logger, func() error) { for _, msg := range parseErrs { l.Error(msg) } + for _, msg := range warnings { + l.Warn(msg) + } return l.Named(verShaNameStatic()), close } @@ -232,8 +243,8 @@ type Config struct { Dir string JsonConsole bool UnixTS bool - FileMaxSize int // megabytes - FileMaxAge int // days + FileMaxSizeMB int + FileMaxAgeDays int FileMaxBackups int // files } @@ -257,12 +268,12 @@ func (c *Config) New() (Logger, func() error) { // DebugLogsToDisk returns whether debug logs should be stored in disk func (c Config) DebugLogsToDisk() bool { - return c.FileMaxSize > 0 + return c.FileMaxSizeMB > 0 } // RequiredDiskSpace returns the required disk space in order to allow debug logs to be stored in disk func (c Config) RequiredDiskSpace() utils.FileSize { - return utils.FileSize(c.FileMaxSize * (c.FileMaxBackups + 1)) + return utils.FileSize(c.FileMaxSizeMB * utils.MB * (c.FileMaxBackups + 1)) } // InitColor explicitly sets the global color.NoColor option. diff --git a/core/logger/test_logger.go b/core/logger/test_logger.go index e73c9d444e1..ec49f46bc20 100644 --- a/core/logger/test_logger.go +++ b/core/logger/test_logger.go @@ -82,7 +82,7 @@ func TestLoggerObserved(t T, lvl zapcore.Level) (Logger, *observer.ObservedLogs) func testLogger(t T, cores ...zapcore.Core) SugaredLogger { cfg := newZapConfigTest() - ll, invalid := envvar.LogLevel.ParseLogLevel() + ll, invalid := envvar.LogLevel.Parse() cfg.Level.SetLevel(ll) l, close, err := zapLoggerConfig{Config: cfg}.newLogger(cores...) if err != nil { diff --git a/core/logger/zap_disk_logging.go b/core/logger/zap_disk_logging.go index b2786cfa5a8..84ea526522b 100644 --- a/core/logger/zap_disk_logging.go +++ b/core/logger/zap_disk_logging.go @@ -42,8 +42,8 @@ func (cfg zapLoggerConfig) newDiskCore() (zapcore.Core, error) { encoder = zapcore.NewConsoleEncoder(makeEncoderConfig(cfg.local)) sink = zapcore.AddSync(&lumberjack.Logger{ Filename: logFileURI(cfg.local.Dir), - MaxSize: cfg.local.FileMaxSize, - MaxAge: cfg.local.FileMaxAge, + MaxSize: cfg.local.FileMaxSizeMB, + MaxAge: cfg.local.FileMaxAgeDays, MaxBackups: cfg.local.FileMaxBackups, Compress: true, }) diff --git a/core/logger/zap_test.go b/core/logger/zap_test.go index 2d6c2fc5470..74d0c1bdd12 100644 --- a/core/logger/zap_test.go +++ b/core/logger/zap_test.go @@ -21,12 +21,12 @@ import ( func TestZapLogger_OutOfDiskSpace(t *testing.T) { cfg := newZapConfigTest() - ll, invalid := envvar.LogLevel.ParseLogLevel() + ll, invalid := envvar.LogLevel.Parse() assert.Empty(t, invalid) cfg.Level.SetLevel(ll) - maxSize, invalid := envvar.LogFileMaxSize.ParseFileSize() + maxSize, invalid := envvar.LogFileMaxSize.Parse() assert.Empty(t, invalid) logsDir := t.TempDir() @@ -43,9 +43,9 @@ func TestZapLogger_OutOfDiskSpace(t *testing.T) { Config: cfg, local: Config{ Dir: logsDir, - FileMaxAge: 0, + FileMaxAgeDays: 0, FileMaxBackups: 1, - FileMaxSize: int(logFileSize), + FileMaxSizeMB: int(logFileSize / utils.MB), }, diskPollConfig: pollCfg, diskLogLevel: zap.NewAtomicLevelAt(zapcore.DebugLevel), @@ -67,7 +67,7 @@ func TestZapLogger_OutOfDiskSpace(t *testing.T) { stop: stop, pollChan: pollChan, } - zapCfg.local.FileMaxSize = int(maxSize) * 2 + zapCfg.local.FileMaxSizeMB = int(maxSize/utils.MB) * 2 lggr, close, err := zapCfg.newLogger() assert.NoError(t, err) @@ -101,7 +101,7 @@ func TestZapLogger_OutOfDiskSpace(t *testing.T) { stop: stop, pollChan: pollChan, } - zapCfg.local.FileMaxSize = int(maxSize) * 2 + zapCfg.local.FileMaxSizeMB = int(maxSize/utils.MB) * 2 lggr, close, err := zapCfg.newLogger() assert.NoError(t, err) @@ -135,7 +135,7 @@ func TestZapLogger_OutOfDiskSpace(t *testing.T) { stop: stop, pollChan: pollChan, } - zapCfg.local.FileMaxSize = int(maxSize) * 2 + zapCfg.local.FileMaxSizeMB = int(maxSize/utils.MB) * 2 lggr, close, err := zapCfg.newLogger() assert.NoError(t, err) @@ -185,7 +185,7 @@ func TestZapLogger_OutOfDiskSpace(t *testing.T) { stop: stop, pollChan: pollChan, } - zapCfg.local.FileMaxSize = int(maxSize) * 2 + zapCfg.local.FileMaxSizeMB = int(maxSize/utils.MB) * 2 lggr, close, err := zapCfg.newLogger() assert.NoError(t, err) diff --git a/core/main_test.go b/core/main_test.go index 977e5c7a3c2..bdf7da65d74 100644 --- a/core/main_test.go +++ b/core/main_test.go @@ -434,7 +434,6 @@ func ExampleRun_node_profile() { // OPTIONS: // --seconds value, -s value duration of profile capture (default: 8) // --output_dir value, -o value output directory of the captured profile (default: "/tmp/") - } func ExampleRun_txs() { @@ -447,8 +446,9 @@ func ExampleRun_txs() { // core.test txs command [command options] [arguments...] // // COMMANDS: - // evm Commands for handling EVM transactions - // terra Commands for handling Terra transactions + // evm Commands for handling EVM transactions + // solana Commands for handling Solana transactions + // terra Commands for handling Terra transactions // // OPTIONS: // --help, -h show help @@ -472,6 +472,22 @@ func ExampleRun_txs_evm() { // --help, -h show help } +func ExampleRun_txs_solana() { + run("txs", "solana", "--help") + // Output: + // NAME: + // core.test txs solana - Commands for handling Solana transactions + // + // USAGE: + // core.test txs solana command [command options] [arguments...] + // + // COMMANDS: + // create Send lamports from node Solana account to destination . + // + // OPTIONS: + // --help, -h show help +} + func ExampleRun_txs_terra() { run("txs", "terra", "--help") // Output: @@ -498,8 +514,9 @@ func ExampleRun_chains() { // core.test chains command [command options] [arguments...] // // COMMANDS: - // evm Commands for handling EVM chains - // terra Commands for handling Terra chains + // evm Commands for handling EVM chains + // solana Commands for handling Solana chains + // terra Commands for handling Terra chains // // OPTIONS: // --help, -h show help @@ -524,6 +541,25 @@ func ExampleRun_chains_evm() { // --help, -h show help } +func ExampleRun_chains_solana() { + run("chains", "solana", "--help") + // Output: + // NAME: + // core.test chains solana - Commands for handling Solana chains + // + // USAGE: + // core.test chains solana command [command options] [arguments...] + // + // COMMANDS: + // create Create a new Solana chain + // delete Delete a Solana chain + // list List all Solana chains + // configure Configure a Solana chain + // + // OPTIONS: + // --help, -h show help +} + func ExampleRun_chains_terra() { run("chains", "terra", "--help") // Output: @@ -553,8 +589,9 @@ func ExampleRun_nodes() { // core.test nodes command [command options] [arguments...] // // COMMANDS: - // evm Commands for handling EVM node configuration - // terra Commands for handling Terra node configuration + // evm Commands for handling EVM node configuration + // solana Commands for handling Solana node configuration + // terra Commands for handling Terra node configuration // // OPTIONS: // --help, -h show help @@ -578,6 +615,24 @@ func ExampleRun_nodes_evm() { // --help, -h show help } +func ExampleRun_nodes_solana() { + run("nodes", "solana", "--help") + // Output: + // NAME: + // core.test nodes solana - Commands for handling Solana node configuration + // + // USAGE: + // core.test nodes solana command [command options] [arguments...] + // + // COMMANDS: + // create Create a new Solana node + // delete Delete a Solana node + // list List all Solana nodes + // + // OPTIONS: + // --help, -h show help +} + func ExampleRun_nodes_terra() { run("nodes", "terra", "--help") // Output: diff --git a/core/scripts/chaincli/.env.example b/core/scripts/chaincli/.env.example index 36c15a7e740..698db0b88ca 100644 --- a/core/scripts/chaincli/.env.example +++ b/core/scripts/chaincli/.env.example @@ -22,7 +22,6 @@ STALENESS_SECONDS= GAS_CEILING_MULTIPLIER= FALLBACK_GAS_PRICE= FALLBACK_LINK_PRICE= -MUST_TAKE_TURNS= # Optional Keepers config KEEPER_REGISTRY_ADDRESS= diff --git a/core/scripts/chaincli/config/config.go b/core/scripts/chaincli/config/config.go index 8251a3149cc..f88a75228e6 100644 --- a/core/scripts/chaincli/config/config.go +++ b/core/scripts/chaincli/config/config.go @@ -18,8 +18,7 @@ type Config struct { KeeperPasswords []string `mapstructure:"KEEPER_PASSWORDS"` ApproveAmount string `mapstructure:"APPROVE_AMOUNT"` GasLimit uint64 `mapstructure:"GAS_LIMIT"` - FundNodeAmount int `mapstructure:"FUND_CHAINLINK_NODE"` - MustTakeTurns bool `mapstructure:"MUST_TAKE_TURNS"` + FundNodeAmount string `mapstructure:"FUND_CHAINLINK_NODE"` // Keeper config LinkETHFeedAddr string `mapstructure:"LINK_ETH_FEED"` diff --git a/core/scripts/chaincli/handler/handler.go b/core/scripts/chaincli/handler/handler.go index f65f7b38f73..04282f553a9 100644 --- a/core/scripts/chaincli/handler/handler.go +++ b/core/scripts/chaincli/handler/handler.go @@ -104,15 +104,14 @@ func (h *baseHandler) buildTxOpts(ctx context.Context) *bind.TransactOpts { } // Send eth from prefunded account. -// Amount is number of ETH not wei. -func (k *Keeper) sendEth(ctx context.Context, to common.Address, amount int) error { +// Amount is number of wei. +func (k *Keeper) sendEth(ctx context.Context, to common.Address, amount *big.Int) error { txOpts := k.buildTxOpts(ctx) - txOpts.Value = big.NewInt(0).Mul(big.NewInt(int64(amount)), big.NewInt(1000000000000000000)) tx := types.NewTx(&types.LegacyTx{ Nonce: txOpts.Nonce.Uint64(), To: &to, - Value: txOpts.Value, + Value: amount, Gas: txOpts.GasLimit, GasPrice: txOpts.GasPrice, Data: nil, diff --git a/core/scripts/chaincli/handler/keeper.go b/core/scripts/chaincli/handler/keeper.go index 1d0271fd164..09ea194f4e3 100644 --- a/core/scripts/chaincli/handler/keeper.go +++ b/core/scripts/chaincli/handler/keeper.go @@ -11,7 +11,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/smartcontractkit/chainlink/core/cmd" - keeper "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_vb_wrapper" + keeper "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_wrapper" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/upkeep_counter_wrapper" upkeep "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/upkeep_perform_counter_restrictive_wrapper" "github.com/smartcontractkit/chainlink/core/logger" @@ -45,7 +45,7 @@ func (k *Keeper) DeployKeepers(ctx context.Context) { } func (k *Keeper) deployKeepers(ctx context.Context, keepers []common.Address, owners []common.Address) common.Address { - var registry *keeper.KeeperRegistryVB + var registry *keeper.KeeperRegistry var registryAddr common.Address var upkeepCount int64 if k.cfg.RegistryAddress != "" { @@ -71,7 +71,13 @@ func (k *Keeper) deployKeepers(ctx context.Context, keepers []common.Address, ow for i, keeperAddr := range k.cfg.Keepers { url := k.cfg.KeeperURLs[i] email := k.cfg.KeeperEmails[i] + if len(email) == 0 { + email = defaultChainlinkNodeLogin + } pwd := k.cfg.KeeperPasswords[i] + if len(pwd) == 0 { + pwd = defaultChainlinkNodePassword + } err := k.createKeeperJobOnExistingNode(url, email, pwd, registryAddr.Hex(), keeperAddr) if err != nil { log.Printf("Keeper Job not created for keeper %d: %s %s\n", i, url, keeperAddr) @@ -102,8 +108,8 @@ func (k *Keeper) deployKeepers(ctx context.Context, keepers []common.Address, ow return registryAddr } -func (k *Keeper) deployRegistry(ctx context.Context) (common.Address, *keeper.KeeperRegistryVB) { - registryAddr, deployKeeperRegistryTx, registryInstance, err := keeper.DeployKeeperRegistryVB(k.buildTxOpts(ctx), k.client, +func (k *Keeper) deployRegistry(ctx context.Context) (common.Address, *keeper.KeeperRegistry) { + registryAddr, deployKeeperRegistryTx, registryInstance, err := keeper.DeployKeeperRegistry(k.buildTxOpts(ctx), k.client, common.HexToAddress(k.cfg.LinkTokenAddr), common.HexToAddress(k.cfg.LinkETHFeedAddr), common.HexToAddress(k.cfg.FastGasFeedAddr), @@ -115,7 +121,6 @@ func (k *Keeper) deployRegistry(ctx context.Context) (common.Address, *keeper.Ke k.cfg.GasCeilingMultiplier, big.NewInt(k.cfg.FallbackGasPrice), big.NewInt(k.cfg.FallbackLinkPrice), - k.cfg.MustTakeTurns, ) if err != nil { log.Fatal("DeployAbi failed: ", err) @@ -126,9 +131,9 @@ func (k *Keeper) deployRegistry(ctx context.Context) (common.Address, *keeper.Ke } // GetRegistry is used to attach to an existing registry -func (k *Keeper) GetRegistry(ctx context.Context) (common.Address, *keeper.KeeperRegistryVB) { +func (k *Keeper) GetRegistry(ctx context.Context) (common.Address, *keeper.KeeperRegistry) { registryAddr := common.HexToAddress(k.cfg.RegistryAddress) - registryInstance, err := keeper.NewKeeperRegistryVB( + registryInstance, err := keeper.NewKeeperRegistry( registryAddr, k.client, ) @@ -145,8 +150,7 @@ func (k *Keeper) GetRegistry(ctx context.Context) (common.Address, *keeper.Keepe big.NewInt(k.cfg.StalenessSeconds), k.cfg.GasCeilingMultiplier, big.NewInt(k.cfg.FallbackGasPrice), - big.NewInt(k.cfg.FallbackLinkPrice), - k.cfg.MustTakeTurns) + big.NewInt(k.cfg.FallbackLinkPrice)) if err != nil { log.Fatal("Registry config update: ", err) } @@ -159,7 +163,7 @@ func (k *Keeper) GetRegistry(ctx context.Context) (common.Address, *keeper.Keepe } // deployUpkeeps deploys N amount of upkeeps and register them in the keeper registry deployed above -func (k *Keeper) deployUpkeeps(ctx context.Context, registryAddr common.Address, registryInstance *keeper.KeeperRegistryVB, existingCount int64) { +func (k *Keeper) deployUpkeeps(ctx context.Context, registryAddr common.Address, registryInstance *keeper.KeeperRegistry, existingCount int64) { fmt.Println() log.Println("Deploying upkeeps...") for i := existingCount; i < k.cfg.UpkeepCount+existingCount; i++ { diff --git a/core/scripts/chaincli/handler/keeper_launch.go b/core/scripts/chaincli/handler/keeper_launch.go index 888ca0639fd..12f905f413f 100644 --- a/core/scripts/chaincli/handler/keeper_launch.go +++ b/core/scripts/chaincli/handler/keeper_launch.go @@ -27,7 +27,7 @@ import ( "github.com/manyminds/api2go/jsonapi" "github.com/smartcontractkit/chainlink/core/cmd" - keeper "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_vb_wrapper" + keeper "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_wrapper" "github.com/smartcontractkit/chainlink/core/logger" helpers "github.com/smartcontractkit/chainlink/core/scripts/common" "github.com/smartcontractkit/chainlink/core/sessions" @@ -36,11 +36,11 @@ import ( ) const ( - defaultChainlinkNodeImage = "smartcontract/chainlink:1.1.0" + defaultChainlinkNodeImage = "smartcontract/chainlink:latest" defaultPOSTGRESImage = "postgres:13" - defaultChainlinkNodeLogin = "test@smartcontract.com" - defaultChainlinkNodePassword = "!PASsword000!" + defaultChainlinkNodeLogin = "notreal@fakeemail.ch" + defaultChainlinkNodePassword = "twochains" ) type cfg struct { @@ -86,7 +86,7 @@ func (k *Keeper) LaunchAndTest(ctx context.Context, withdraw bool) { wg.Wait() // Deploy keeper registry or get an existing one - var registry *keeper.KeeperRegistryVB + var registry *keeper.KeeperRegistry var registryAddr common.Address var upkeepCount int64 if k.cfg.RegistryAddress != "" { @@ -158,8 +158,13 @@ func (k *Keeper) LaunchAndTest(ctx context.Context, withdraw bool) { log.Println("Keeper job has been successfully created in the Chainlink node with address ", startedNode.url) // Fund node if needed - if k.cfg.FundNodeAmount > 0 { - if err = k.sendEth(ctx, nodeAddr, k.cfg.FundNodeAmount); err != nil { + fundAmt, ok := (&big.Int{}).SetString(k.cfg.FundNodeAmount, 10) + if !ok { + log.Printf("failed to parse FUND_CHAINLINK_NODE: %s", k.cfg.FundNodeAmount) + continue + } + if fundAmt.Cmp(big.NewInt(0)) != 0 { + if err = k.sendEth(ctx, nodeAddr, fundAmt); err != nil { log.Println("Failed to fund chainlink node: ", err) continue } @@ -201,7 +206,7 @@ func (k *Keeper) LaunchAndTest(ctx context.Context, withdraw bool) { } // cancelAndWithdrawUpkeeps cancels all upkeeps of the registry and withdraws funds -func (k *Keeper) cancelAndWithdrawUpkeeps(ctx context.Context, registryInstance *keeper.KeeperRegistryVB) error { +func (k *Keeper) cancelAndWithdrawUpkeeps(ctx context.Context, registryInstance *keeper.KeeperRegistry) error { count, err := registryInstance.GetUpkeepCount(&bind.CallOpts{Context: ctx}) if err != nil { return fmt.Errorf("failed to get upkeeps count: %s", err) @@ -370,6 +375,7 @@ func (k *Keeper) launchChainlinkNode(ctx context.Context, port int) (string, fun "GAS_ESTIMATOR_MODE=BlockHistory", "ALLOW_ORIGINS=*", "DATABASE_TIMEOUT=0", + "KEEPER_CHECK_UPKEEP_GAS_PRICE_FEATURE_ENABLED=true", }, ExposedPorts: map[nat.Port]struct{}{ nat.Port(portStr): {}, diff --git a/core/scripts/chaincli/handler/keeper_withdraw.go b/core/scripts/chaincli/handler/keeper_withdraw.go index 87582cd5568..74a4e39690b 100644 --- a/core/scripts/chaincli/handler/keeper_withdraw.go +++ b/core/scripts/chaincli/handler/keeper_withdraw.go @@ -6,13 +6,13 @@ import ( "github.com/ethereum/go-ethereum/common" - keeper "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_vb_wrapper" + keeper "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_wrapper" ) // Withdraw takes a keeper registry address cancels all upkeeps and withdraws the funds func (k *Keeper) Withdraw(ctx context.Context, hexAddr string) { registryAddr := common.HexToAddress(hexAddr) - registryInstance, err := keeper.NewKeeperRegistryVB( + registryInstance, err := keeper.NewKeeperRegistry( registryAddr, k.client, ) diff --git a/core/scripts/common/helpers.go b/core/scripts/common/helpers.go index 9b3136c015a..df7a691b788 100644 --- a/core/scripts/common/helpers.go +++ b/core/scripts/common/helpers.go @@ -41,16 +41,27 @@ func ExplorerLink(chainID int64, txHash common.Hash) string { fmtURL = "https://rinkeby.etherscan.io/tx/%s" case 42: // Kovan fmtURL = "https://kovan.etherscan.io/tx/%s" + case 56: // BSC mainnet fmtURL = "https://bscscan.com/tx/%s" case 97: // BSC testnet fmtURL = "https://testnet.bscscan.com/tx/%s" + case 137: // Polygon mainnet fmtURL = "https://polygonscan.com/tx/%s" - case 4002: // Fantom testnet - fmtURL = "https://testnet.ftmscan.com/tx/%s" case 80001: // Polygon Mumbai testnet fmtURL = "https://mumbai.polygonscan.com/tx/%s" + + case 250: // Fantom mainnet + fmtURL = "https://ftmscan.com/tx/%s" + case 4002: // Fantom testnet + fmtURL = "https://testnet.ftmscan.com/tx/%s" + + case 43114: // Avalanche mainnet + fmtURL = "https://snowtrace.io/tx/%s" + case 43113: // Avalanche testnet + fmtURL = "https://testnet.snowtrace.io/tx/%s" + default: // Unknown chain, return TX as-is fmtURL = "%s" } diff --git a/core/scripts/vrfv2/testnet/main.go b/core/scripts/vrfv2/testnet/main.go index a63aa39bc1a..2b7400c1867 100644 --- a/core/scripts/vrfv2/testnet/main.go +++ b/core/scripts/vrfv2/testnet/main.go @@ -11,6 +11,7 @@ import ( "strconv" "strings" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -18,17 +19,34 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/shopspring/decimal" + "github.com/smartcontractkit/sqlx" + + evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/batch_blockhash_store" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/batch_vrf_coordinator_v2" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/blockhash_store" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/link_token_interface" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/vrf_coordinator_v2" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/vrf_external_sub_owner_example" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/vrf_load_test_external_sub_owner" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/vrf_single_consumer_example" + "github.com/smartcontractkit/chainlink/core/logger" helpers "github.com/smartcontractkit/chainlink/core/scripts/common" + "github.com/smartcontractkit/chainlink/core/services/keystore" + "github.com/smartcontractkit/chainlink/core/services/vrf/proof" "github.com/smartcontractkit/chainlink/core/utils" ) +var ( + batchCoordinatorV2ABI = evmtypes.MustGetABI(batch_vrf_coordinator_v2.BatchVRFCoordinatorV2ABI) +) + +type logconfig struct{} + +func (c logconfig) LogSQL() bool { + return false +} + func main() { ethURL, set := os.LookupEnv("ETH_URL") if !set { @@ -88,6 +106,182 @@ func main() { //owner.GasPrice = gp.Mul(gp, big.NewInt(2)) switch os.Args[1] { + case "batch-coordinatorv2-deploy": + cmd := flag.NewFlagSet("batch-coordinatorv2-deploy", flag.ExitOnError) + coordinatorAddr := cmd.String("coordinator-address", "", "address of the vrf coordinator v2 contract") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address") + batchCoordinatorAddress, tx, _, err := batch_vrf_coordinator_v2.DeployBatchVRFCoordinatorV2(owner, ec, common.HexToAddress(*coordinatorAddr)) + helpers.PanicErr(err) + fmt.Println("BatchVRFCoordinatorV2:", batchCoordinatorAddress.Hex(), "tx:", helpers.ExplorerLink(chainID, tx.Hash())) + case "batch-coordinatorv2-fulfill": + cmd := flag.NewFlagSet("batch-coordinatorv2-fulfill", flag.ExitOnError) + batchCoordinatorAddr := cmd.String("batch-coordinator-address", "", "address of the batch vrf coordinator v2 contract") + pubKeyHex := cmd.String("pubkeyhex", "", "compressed pubkey hex") + dbURL := cmd.String("db-url", "", "postgres database url") + keystorePassword := cmd.String("keystore-pw", "", "password to the keystore") + submit := cmd.Bool("submit", false, "whether to submit the fulfillments or not") + estimateGas := cmd.Bool("estimate-gas", false, "whether to estimate gas or not") + + // NOTE: it is assumed all of these are of the same length and that + // elements correspond to each other index-wise. this property is not checked. + preSeeds := cmd.String("preseeds", "", "comma-separated request preSeeds") + blockHashes := cmd.String("blockhashes", "", "comma-separated request blockhashes") + blockNums := cmd.String("blocknums", "", "comma-separated request blocknumbers") + subIDs := cmd.String("subids", "", "comma-separated request subids") + cbGasLimits := cmd.String("cbgaslimits", "", "comma-separated request callback gas limits") + numWordses := cmd.String("numwordses", "", "comma-separated request num words") + senders := cmd.String("senders", "", "comma-separated request senders") + + helpers.ParseArgs(cmd, os.Args[2:], + "batch-coordinator-address", "pubkeyhex", "db-url", + "keystore-pw", "preseeds", "blockhashes", "blocknums", + "subids", "cbgaslimits", "numwordses", "senders", "submit", + ) + + preSeedSlice := parseIntSlice(*preSeeds) + bhSlice := parseHashSlice(*blockHashes) + blockNumSlice := parseIntSlice(*blockNums) + subIDSlice := parseIntSlice(*subIDs) + cbLimitsSlice := parseIntSlice(*cbGasLimits) + numWordsSlice := parseIntSlice(*numWordses) + senderSlice := parseAddressSlice(*senders) + + batchCoordinator, err := batch_vrf_coordinator_v2.NewBatchVRFCoordinatorV2(common.HexToAddress(*batchCoordinatorAddr), ec) + helpers.PanicErr(err) + + db := sqlx.MustOpen("postgres", *dbURL) + lggr, _ := logger.NewLogger() + + keyStore := keystore.New(db, utils.DefaultScryptParams, lggr, logconfig{}) + err = keyStore.Unlock(*keystorePassword) + helpers.PanicErr(err) + + k, err := keyStore.VRF().Get(*pubKeyHex) + helpers.PanicErr(err) + + fmt.Println("vrf key found:", k) + + proofs := []batch_vrf_coordinator_v2.VRFTypesProof{} + reqCommits := []batch_vrf_coordinator_v2.VRFTypesRequestCommitment{} + for i := range preSeedSlice { + ps, err := proof.BigToSeed(preSeedSlice[i]) + helpers.PanicErr(err) + preSeedData := proof.PreSeedDataV2{ + PreSeed: ps, + BlockHash: bhSlice[i], + BlockNum: blockNumSlice[i].Uint64(), + SubId: subIDSlice[i].Uint64(), + CallbackGasLimit: uint32(cbLimitsSlice[i].Uint64()), + NumWords: uint32(numWordsSlice[i].Uint64()), + Sender: senderSlice[i], + } + fmt.Printf("preseed data iteration %d: %+v\n", i, preSeedData) + finalSeed := proof.FinalSeedV2(preSeedData) + + p, err := keyStore.VRF().GenerateProof(*pubKeyHex, finalSeed) + helpers.PanicErr(err) + + onChainProof, rc, err := proof.GenerateProofResponseFromProofV2(p, preSeedData) + helpers.PanicErr(err) + + proofs = append(proofs, batch_vrf_coordinator_v2.VRFTypesProof(onChainProof)) + reqCommits = append(reqCommits, batch_vrf_coordinator_v2.VRFTypesRequestCommitment(rc)) + } + + fmt.Printf("proofs: %+v\n\n", proofs) + fmt.Printf("request commitments: %+v\n\n", reqCommits) + + if *submit { + fmt.Println("submitting fulfillments...") + tx, err := batchCoordinator.FulfillRandomWords(owner, proofs, reqCommits) + helpers.PanicErr(err) + + fmt.Println("waiting for it to mine:", helpers.ExplorerLink(chainID, tx.Hash())) + _, err = bind.WaitMined(context.Background(), ec, tx) + helpers.PanicErr(err) + fmt.Println("done") + } + + if *estimateGas { + fmt.Println("estimating gas") + payload, err := batchCoordinatorV2ABI.Pack("fulfillRandomWords", proofs, reqCommits) + helpers.PanicErr(err) + + a := batchCoordinator.Address() + gasEstimate, err := ec.EstimateGas(context.Background(), ethereum.CallMsg{ + From: owner.From, + To: &a, + Data: payload, + }) + helpers.PanicErr(err) + + fmt.Println("gas estimate:", gasEstimate) + } + case "coordinatorv2-fulfill": + cmd := flag.NewFlagSet("coordinatorv2-fulfill", flag.ExitOnError) + coordinatorAddr := cmd.String("coordinator-address", "", "address of the vrf coordinator v2 contract") + pubKeyHex := cmd.String("pubkeyhex", "", "compressed pubkey hex") + dbURL := cmd.String("db-url", "", "postgres database url") + keystorePassword := cmd.String("keystore-pw", "", "password to the keystore") + + preSeed := cmd.String("preseed", "", "request preSeed") + blockHash := cmd.String("blockhash", "", "request blockhash") + blockNum := cmd.Uint64("blocknum", 0, "request blocknumber") + subID := cmd.Uint64("subid", 0, "request subid") + cbGasLimit := cmd.Uint("cbgaslimit", 0, "request callback gas limit") + numWords := cmd.Uint("numwords", 0, "request num words") + sender := cmd.String("sender", "", "request sender") + + helpers.ParseArgs(cmd, os.Args[2:], + "coordinator-address", "pubkeyhex", "db-url", + "keystore-pw", "preseed", "blockhash", "blocknum", + "subid", "cbgaslimit", "numwords", "sender", + ) + + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddr), ec) + helpers.PanicErr(err) + + db := sqlx.MustOpen("postgres", *dbURL) + lggr, _ := logger.NewLogger() + + keyStore := keystore.New(db, utils.DefaultScryptParams, lggr, logconfig{}) + err = keyStore.Unlock(*keystorePassword) + helpers.PanicErr(err) + + k, err := keyStore.VRF().Get(*pubKeyHex) + helpers.PanicErr(err) + + fmt.Println("vrf key found:", k) + + ps, err := proof.BigToSeed(decimal.RequireFromString(*preSeed).BigInt()) + helpers.PanicErr(err) + preSeedData := proof.PreSeedDataV2{ + PreSeed: ps, + BlockHash: common.HexToHash(*blockHash), + BlockNum: *blockNum, + SubId: *subID, + CallbackGasLimit: uint32(*cbGasLimit), + NumWords: uint32(*numWords), + Sender: common.HexToAddress(*sender), + } + fmt.Printf("preseed data: %+v\n", preSeedData) + finalSeed := proof.FinalSeedV2(preSeedData) + + p, err := keyStore.VRF().GenerateProof(*pubKeyHex, finalSeed) + helpers.PanicErr(err) + + onChainProof, rc, err := proof.GenerateProofResponseFromProofV2(p, preSeedData) + helpers.PanicErr(err) + + fmt.Printf("Proof: %+v, commitment: %+v\nSending fulfillment!", onChainProof, rc) + + tx, err := coordinator.FulfillRandomWords(owner, onChainProof, rc) + helpers.PanicErr(err) + + fmt.Println("waiting for it to mine:", helpers.ExplorerLink(chainID, tx.Hash())) + _, err = bind.WaitMined(context.Background(), ec, tx) + helpers.PanicErr(err) + fmt.Println("done") case "batch-bhs-deploy": cmd := flag.NewFlagSet("batch-bhs-deploy", flag.ExitOnError) bhsAddr := cmd.String("bhs-address", "", "address of the blockhash store contract") @@ -102,7 +296,7 @@ func main() { helpers.ParseArgs(cmd, os.Args[2:], "batch-bhs-address", "block-numbers") batchBHS, err := batch_blockhash_store.NewBatchBlockhashStore(common.HexToAddress(*batchAddr), ec) helpers.PanicErr(err) - blockNumbers, err := parseIntSlice(*blockNumbersArg) + blockNumbers := parseIntSlice(*blockNumbersArg) helpers.PanicErr(err) tx, err := batchBHS.Store(owner, blockNumbers) helpers.PanicErr(err) @@ -114,7 +308,7 @@ func main() { helpers.ParseArgs(cmd, os.Args[2:], "batch-bhs-address", "block-numbers") batchBHS, err := batch_blockhash_store.NewBatchBlockhashStore(common.HexToAddress(*batchAddr), ec) helpers.PanicErr(err) - blockNumbers, err := parseIntSlice(*blockNumbersArg) + blockNumbers := parseIntSlice(*blockNumbersArg) helpers.PanicErr(err) blockhashes, err := batchBHS.GetBlockhashes(nil, blockNumbers) helpers.PanicErr(err) @@ -515,23 +709,29 @@ func main() { tx, err := consumer.RequestRandomWords(owner, *subID, uint32(*cbGasLimit), uint16(*requestConfirmations), uint32(*numWords), keyHashBytes) helpers.PanicErr(err) fmt.Println("TX", helpers.ExplorerLink(chainID, tx.Hash())) + r, err := bind.WaitMined(context.Background(), ec, tx) + helpers.PanicErr(err) + fmt.Println("Receipt blocknumber:", r.BlockNumber) case "eoa-load-test-request": request := flag.NewFlagSet("eoa-load-test-request", flag.ExitOnError) consumerAddress := request.String("consumer-address", "", "consumer address") subID := request.Uint64("sub-id", 0, "subscription ID") requestConfirmations := request.Uint("request-confirmations", 3, "minimum request confirmations") keyHash := request.String("key-hash", "", "key hash") - requests := request.Uint("requests", 10, "number of randomness requests to make") + requests := request.Uint("requests", 10, "number of randomness requests to make per run") + runs := request.Uint("runs", 1, "number of runs to do. total randomness requests will be (requests * runs).") helpers.ParseArgs(request, os.Args[2:], "consumer-address", "sub-id", "key-hash") keyHashBytes := common.HexToHash(*keyHash) consumer, err := vrf_load_test_external_sub_owner.NewVRFLoadTestExternalSubOwner( common.HexToAddress(*consumerAddress), ec) helpers.PanicErr(err) - tx, err := consumer.RequestRandomWords(owner, *subID, uint16(*requestConfirmations), - keyHashBytes, uint16(*requests)) - helpers.PanicErr(err) - fmt.Println("TX", helpers.ExplorerLink(chainID, tx.Hash())) + for i := 0; i < int(*runs); i++ { + tx, err := consumer.RequestRandomWords(owner, *subID, uint16(*requestConfirmations), + keyHashBytes, uint16(*requests)) + helpers.PanicErr(err) + fmt.Printf("TX %d: %s\n", i+1, helpers.ExplorerLink(chainID, tx.Hash())) + } case "eoa-transfer-sub": trans := flag.NewFlagSet("eoa-transfer-sub", flag.ExitOnError) coordinatorAddress := trans.String("coordinator-address", "", "coordinator address") @@ -626,17 +826,31 @@ func main() { } } -func parseIntSlice(arg string) (ret []*big.Int, err error) { +func parseIntSlice(arg string) (ret []*big.Int) { parts := strings.Split(arg, ",") ret = []*big.Int{} for _, part := range parts { - i, err := strconv.ParseInt(part, 10, 64) - if err != nil { - return nil, err - } - ret = append(ret, big.NewInt(i)) + ret = append(ret, decimal.RequireFromString(part).BigInt()) } - return ret, nil + return ret +} + +func parseAddressSlice(arg string) (ret []common.Address) { + parts := strings.Split(arg, ",") + ret = []common.Address{} + for _, part := range parts { + ret = append(ret, common.HexToAddress(part)) + } + return +} + +func parseHashSlice(arg string) (ret []common.Hash) { + parts := strings.Split(arg, ",") + ret = []common.Hash{} + for _, part := range parts { + ret = append(ret, common.HexToHash(part)) + } + return } // decreasingBlockRange creates a continugous block range starting with diff --git a/core/services/blockhashstore/delegate.go b/core/services/blockhashstore/delegate.go index 12898619d42..9fee46f6a17 100644 --- a/core/services/blockhashstore/delegate.go +++ b/core/services/blockhashstore/delegate.go @@ -63,7 +63,7 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { chain.Config().EvmFinalityDepth(), jb.BlockhashStoreSpec.WaitBlocks) } - keys, err := d.ks.SendingKeys() + keys, err := d.ks.SendingKeys(chain.ID()) if err != nil { return nil, errors.Wrap(err, "getting sending keys") } diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index 92d6de70278..2b27a304f48 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -15,7 +15,7 @@ import ( "go.uber.org/multierr" "go.uber.org/zap/zapcore" - "github.com/smartcontractkit/chainlink-solana/pkg/solana" + pkgsolana "github.com/smartcontractkit/chainlink-solana/pkg/solana" pkgterra "github.com/smartcontractkit/chainlink-terra/pkg/terra" "github.com/smartcontractkit/sqlx" @@ -23,6 +23,7 @@ import ( "github.com/smartcontractkit/chainlink/core/chains/evm" "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/core/chains/solana" "github.com/smartcontractkit/chainlink/core/chains/terra" "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/logger" @@ -146,14 +147,18 @@ type ApplicationOpts struct { // Chains holds a ChainSet for each type of chain. type Chains struct { - EVM evm.ChainSet - Terra terra.ChainSet // nil if disabled + EVM evm.ChainSet + Solana solana.ChainSet // nil if disabled + Terra terra.ChainSet // nil if disabled } func (c *Chains) services() (s []services.ServiceCtx) { if c.EVM != nil { s = append(s, c.EVM) } + if c.Solana != nil { + s = append(s, c.Solana) + } if c.Terra != nil { s = append(s, c.Terra) } @@ -321,6 +326,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { monitoringEndpointGen, chains.EVM, globalLogger, + cfg, ) } else { globalLogger.Debug("Off-chain reporting disabled") @@ -334,7 +340,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { relay.AddRelayer(relaytypes.EVM, evmRelayer) } if cfg.SolanaEnabled() { - solanaRelayer := solana.NewRelayer(globalLogger.Named("Solana.Relayer")) + solanaRelayer := pkgsolana.NewRelayer(globalLogger.Named("Solana.Relayer"), chains.Solana) solanaRelayerCtx := solanaRelayer relay.AddRelayer(relaytypes.Solana, solanaRelayerCtx) } @@ -375,6 +381,14 @@ func NewApplication(opts ApplicationOpts) (Application, error) { jobSpawner := job.NewSpawner(jobORM, cfg, delegates, db, globalLogger, lbs) subservices = append(subservices, jobSpawner, pipelineRunner) + // We start the log poller after the job spawner + // so jobs have a chance to apply their initial log filters. + if cfg.FeatureLogPoller() { + for _, c := range chains.EVM.Chains() { + subservices = append(subservices, c.LogPoller()) + } + } + // TODO: Make feeds manager compatible with multiple chains // See: https://app.clubhouse.io/chainlinklabs/story/14615/add-ability-to-set-chain-id-in-all-pipeline-tasks-that-interact-with-evm var feedsService feeds.Service diff --git a/core/services/directrequest/delegate.go b/core/services/directrequest/delegate.go index 2b9132fb4c8..6fb097b33c8 100644 --- a/core/services/directrequest/delegate.go +++ b/core/services/directrequest/delegate.go @@ -93,8 +93,8 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { pipelineRunner: d.pipelineRunner, pipelineORM: d.pipelineORM, job: jb, - mbOracleRequests: utils.NewHighCapacityMailbox(), - mbOracleCancelRequests: utils.NewHighCapacityMailbox(), + mbOracleRequests: utils.NewHighCapacityMailbox[log.Broadcast](), + mbOracleCancelRequests: utils.NewHighCapacityMailbox[log.Broadcast](), minIncomingConfirmations: concreteSpec.MinIncomingConfirmations.Uint32, requesters: concreteSpec.Requesters, minContractPayment: concreteSpec.MinContractPayment, @@ -121,8 +121,8 @@ type listener struct { job job.Job runs sync.Map shutdownWaitGroup sync.WaitGroup - mbOracleRequests *utils.Mailbox - mbOracleCancelRequests *utils.Mailbox + mbOracleRequests *utils.Mailbox[log.Broadcast] + mbOracleCancelRequests *utils.Mailbox[log.Broadcast] minIncomingConfirmations uint32 requesters models.AddressCollection minContractPayment *assets.Link @@ -220,16 +220,12 @@ func (l *listener) processCancelOracleRequests() { } } -func (l *listener) handleReceivedLogs(mailbox *utils.Mailbox) { +func (l *listener) handleReceivedLogs(mailbox *utils.Mailbox[log.Broadcast]) { for { - i, exists := mailbox.Retrieve() + lb, exists := mailbox.Retrieve() if !exists { return } - lb, ok := i.(log.Broadcast) - if !ok { - panic(errors.Errorf("DirectRequest: invariant violation, expected log.Broadcast but got %T", lb)) - } was, err := l.logBroadcaster.WasAlreadyConsumed(lb) if err != nil { l.logger.Errorw("Could not determine if log was already consumed", "error", err) diff --git a/core/services/feeds/mocks/config.go b/core/services/feeds/mocks/config.go index a51ee8d82e4..23c515b4c6f 100644 --- a/core/services/feeds/mocks/config.go +++ b/core/services/feeds/mocks/config.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/feeds/mocks/connections_manager.go b/core/services/feeds/mocks/connections_manager.go index 58681083fed..fa29c22fcb8 100644 --- a/core/services/feeds/mocks/connections_manager.go +++ b/core/services/feeds/mocks/connections_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/feeds/mocks/feeds_manager_client.go b/core/services/feeds/mocks/feeds_manager_client.go index 7ef73a76ab9..9b3c0f34ba3 100644 --- a/core/services/feeds/mocks/feeds_manager_client.go +++ b/core/services/feeds/mocks/feeds_manager_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/feeds/mocks/orm.go b/core/services/feeds/mocks/orm.go index 8e53fb1fe61..ee19dedc16b 100644 --- a/core/services/feeds/mocks/orm.go +++ b/core/services/feeds/mocks/orm.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/feeds/mocks/service.go b/core/services/feeds/mocks/service.go index 7a55d783098..fb615e6a10e 100644 --- a/core/services/feeds/mocks/service.go +++ b/core/services/feeds/mocks/service.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/feeds/proto/feeds_manager.pb.go b/core/services/feeds/proto/feeds_manager.pb.go index 87764fc14eb..34ebe741705 100644 --- a/core/services/feeds/proto/feeds_manager.pb.go +++ b/core/services/feeds/proto/feeds_manager.pb.go @@ -7,10 +7,11 @@ package proto import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( diff --git a/core/services/feeds/service.go b/core/services/feeds/service.go index 226386fb73a..aa9375941e3 100644 --- a/core/services/feeds/service.go +++ b/core/services/feeds/service.go @@ -179,7 +179,7 @@ func (s *service) SyncNodeInfo(id int64) error { } // Assemble EVM keys - evmKeys, err := s.ethKeyStore.SendingKeys() + evmKeys, err := s.ethKeyStore.SendingKeys(nil) if err != nil { return err } diff --git a/core/services/feeds/service_test.go b/core/services/feeds/service_test.go index 0578bb2137f..7788499aaeb 100644 --- a/core/services/feeds/service_test.go +++ b/core/services/feeds/service_test.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "encoding/hex" + "math/big" "testing" "time" @@ -450,7 +451,7 @@ func Test_Service_SyncNodeInfo(t *testing.T) { // Mock fetching the information to send svc.orm.On("GetManager", feedsMgr.ID).Return(feedsMgr, nil) - svc.ethKeystore.On("SendingKeys").Return(evmKeys, nil) + svc.ethKeystore.On("SendingKeys", (*big.Int)(nil)).Return(evmKeys, nil) svc.ethKeystore. On("GetStatesForKeys", evmKeys). Return([]ethkey.State{{Address: sendingKey.Address, EVMChainID: *utils.NewBigI(42)}}, nil) diff --git a/core/services/fluxmonitorv2/contract_submitter.go b/core/services/fluxmonitorv2/contract_submitter.go index cdf220acb0a..d8eb8ed795e 100644 --- a/core/services/fluxmonitorv2/contract_submitter.go +++ b/core/services/fluxmonitorv2/contract_submitter.go @@ -46,7 +46,7 @@ func NewFluxAggregatorContractSubmitter( // Submit submits the answer by writing a EthTx for the txmgr to // pick up func (c *FluxAggregatorContractSubmitter) Submit(roundID *big.Int, submission *big.Int, qopts ...pg.QOpt) error { - fromAddress, err := c.keyStore.GetRoundRobinAddress() + fromAddress, err := c.keyStore.GetRoundRobinAddress(nil) // FIXME: FluxMonitor probably not compatible with multichain here: https://app.shortcut.com/chainlinklabs/story/34394/fluxmonitor-is-probably-not-compatible-with-multichain if err != nil { return err } diff --git a/core/services/fluxmonitorv2/flux_monitor.go b/core/services/fluxmonitorv2/flux_monitor.go index 18502942b94..ca8377ca831 100644 --- a/core/services/fluxmonitorv2/flux_monitor.go +++ b/core/services/fluxmonitorv2/flux_monitor.go @@ -76,7 +76,7 @@ type FluxMonitor struct { logger logger.Logger - backlog *utils.BoundedPriorityQueue + backlog *utils.BoundedPriorityQueue[log.Broadcast] chProcessLogs chan struct{} utils.StartStopOnce @@ -124,7 +124,7 @@ func NewFluxMonitor( logBroadcaster: logBroadcaster, fluxAggregator: fluxAggregator, logger: fmLogger, - backlog: utils.NewBoundedPriorityQueue(map[uint]uint{ + backlog: utils.NewBoundedPriorityQueue[log.Broadcast](map[uint]int{ // We want reconnecting nodes to be able to submit to a round // that hasn't hit maxAnswers yet, as well as the newest round. PriorityNewRoundLog: 2, @@ -452,7 +452,7 @@ func (fm *FluxMonitor) SetOracleAddress() error { fm.logger.Error("failed to get list of oracles from FluxAggregator contract") return errors.Wrap(err, "failed to get list of oracles from FluxAggregator contract") } - keys, err := fm.keyStore.SendingKeys() + keys, err := fm.keyStore.SendingKeys(nil) // FIXME: FluxMonitor is probably not compatible with multichain here if err != nil { return errors.Wrap(err, "failed to load keys") } @@ -486,11 +486,7 @@ func (fm *FluxMonitor) SetOracleAddress() error { func (fm *FluxMonitor) processLogs() { for !fm.backlog.Empty() { - maybeBroadcast := fm.backlog.Take() - broadcast, ok := maybeBroadcast.(log.Broadcast) - if !ok { - fm.logger.Errorf("Failed to convert backlog into LogBroadcast. Type is %T", maybeBroadcast) - } + broadcast := fm.backlog.Take() fm.processBroadcast(broadcast) } } diff --git a/core/services/fluxmonitorv2/flux_monitor_test.go b/core/services/fluxmonitorv2/flux_monitor_test.go index 44dbe594b65..c365616b9aa 100644 --- a/core/services/fluxmonitorv2/flux_monitor_test.go +++ b/core/services/fluxmonitorv2/flux_monitor_test.go @@ -305,7 +305,7 @@ func setupStoreWithKey(t *testing.T) (*sqlx.DB, common.Address) { // setupStoreWithKey setups a new store and adds a key to the keystore func setupFullDBWithKey(t *testing.T, name string) (*sqlx.DB, common.Address) { - cfg, db := heavyweight.FullTestDB(t, name, true, true) + cfg, db := heavyweight.FullTestDB(t, name) ethKeyStore := cltest.NewKeyStore(t, db, cfg).Eth() _, nodeAddr := cltest.MustAddRandomKeyToKeystore(t, ethKeyStore) @@ -379,7 +379,7 @@ func TestFluxMonitor_PollIfEligible(t *testing.T) { fm, tm := setup(t, db) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() tm.logBroadcaster.On("IsConnected").Return(tc.connected).Once() // Setup Answers @@ -529,7 +529,7 @@ func TestFluxMonitor_PollIfEligible_Creates_JobErr(t *testing.T) { fm, tm := setup(t, db) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() tm.logBroadcaster.On("IsConnected").Return(true).Once() tm.jobORM. @@ -581,7 +581,7 @@ func TestPollingDeviationChecker_BuffersLogs(t *testing.T) { readyToFillQueue := cltest.NewAwaiter() logsAwaiter := cltest.NewAwaiter() - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() tm.fluxAggregator.On("Address").Return(common.Address{}) tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Maybe() @@ -777,7 +777,7 @@ func TestFluxMonitor_TriggerIdleTimeThreshold(t *testing.T) { fm, tm := setup(t, db, disablePollTicker(true), disableIdleTimer(tc.idleTimerDisabled), setIdleTimerPeriod(tc.idleDuration), withORM(orm)) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() const fetchedAnswer = 100 answerBigInt := big.NewInt(fetchedAnswer) @@ -857,7 +857,7 @@ func TestFluxMonitor_HibernationTickerFiresMultipleTimes(t *testing.T) { setHibernationState(true), ) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() const fetchedAnswer = 100 answerBigInt := big.NewInt(fetchedAnswer) @@ -960,7 +960,7 @@ func TestFluxMonitor_HibernationIsEnteredAndRetryTickerStopped(t *testing.T) { setFlags(flags), ) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() const fetchedAnswer = 100 answerBigInt := big.NewInt(fetchedAnswer) @@ -993,7 +993,7 @@ func TestFluxMonitor_HibernationIsEnteredAndRetryTickerStopped(t *testing.T) { select { case <-pollOccured: - case <-time.After(3 * time.Second): + case <-time.After(testutils.WaitTimeout(t)): t.Fatal("Poll did not occur!") } @@ -1019,7 +1019,7 @@ func TestFluxMonitor_HibernationIsEnteredAndRetryTickerStopped(t *testing.T) { select { case <-pollOccured: - case <-time.After(3 * time.Second): + case <-time.After(testutils.WaitTimeout(t)): t.Fatal("Poll did not occur!") } @@ -1050,7 +1050,7 @@ func TestFluxMonitor_HibernationIsEnteredAndRetryTickerStopped(t *testing.T) { select { case <-pollOccured: - case <-time.After(3 * time.Second): + case <-time.After(testutils.WaitTimeout(t)): t.Fatal("Poll did not occur, though it should have via hibernation ticker") } @@ -1071,7 +1071,7 @@ func TestFluxMonitor_IdleTimerResetsOnNewRound(t *testing.T) { setIdleTimerPeriod(2*time.Second), ) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() const fetchedAnswer = 100 answerBigInt := big.NewInt(fetchedAnswer) @@ -1179,7 +1179,7 @@ func TestFluxMonitor_RoundTimeoutCausesPoll_timesOutAtZero(t *testing.T) { fm, tm := setup(t, db, disablePollTicker(true), disableIdleTimer(true), withORM(orm)) tm.keyStore. - On("SendingKeys"). + On("SendingKeys", (*big.Int)(nil)). Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil). Twice() // Once called from the test, once during start @@ -1243,7 +1243,7 @@ func TestFluxMonitor_UsesPreviousRoundStateOnStartup_RoundTimeout(t *testing.T) fm, tm := setup(t, db, disablePollTicker(true), disableIdleTimer(true), withORM(orm)) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() tm.logBroadcaster.On("Register", mock.Anything, mock.Anything).Return(func() {}) tm.logBroadcaster.On("IsConnected").Return(true).Maybe() @@ -1319,7 +1319,7 @@ func TestFluxMonitor_UsesPreviousRoundStateOnStartup_IdleTimer(t *testing.T) { ) initialPollOccurred := make(chan struct{}, 1) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() tm.logBroadcaster.On("Register", mock.Anything, mock.Anything).Return(func() {}) tm.logBroadcaster.On("IsConnected").Return(true).Maybe() tm.fluxAggregator.On("Address").Return(common.Address{}) @@ -1379,7 +1379,7 @@ func TestFluxMonitor_RoundTimeoutCausesPoll_timesOutNotZero(t *testing.T) { fm, tm := setup(t, db, disablePollTicker(true), disableIdleTimer(true), withORM(orm)) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() const fetchedAnswer = 100 answerBigInt := big.NewInt(fetchedAnswer) @@ -1517,7 +1517,7 @@ func TestFluxMonitor_DoesNotDoubleSubmit(t *testing.T) { answer = 100 ) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() tm.logBroadcaster.On("IsConnected").Return(true).Maybe() // Mocks initiated by the New Round log @@ -1631,7 +1631,7 @@ func TestFluxMonitor_DoesNotDoubleSubmit(t *testing.T) { roundID = 3 answer = 100 ) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() tm.logBroadcaster.On("IsConnected").Return(true).Maybe() // First, force the node to try to poll, which should result in a submission @@ -1727,7 +1727,7 @@ func TestFluxMonitor_DoesNotDoubleSubmit(t *testing.T) { roundID = 3 answer = 100 ) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil).Once() tm.logBroadcaster.On("IsConnected").Return(true).Maybe() // First, force the node to try to poll, which should result in a submission @@ -1875,7 +1875,7 @@ func TestFluxMonitor_DrumbeatTicker(t *testing.T) { fm, tm := setup(t, db, disablePollTicker(true), disableIdleTimer(true), enableDrumbeatTicker("@every 3s", 2*time.Second)) - tm.keyStore.On("SendingKeys").Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil) + tm.keyStore.On("SendingKeys", (*big.Int)(nil)).Return([]ethkey.KeyV2{{Address: ethkey.EIP55AddressFromAddress(nodeAddr)}}, nil) const fetchedAnswer = 100 answerBigInt := big.NewInt(fetchedAnswer) diff --git a/core/services/fluxmonitorv2/helpers_test.go b/core/services/fluxmonitorv2/helpers_test.go index fcbc85c581d..095cb39d07a 100644 --- a/core/services/fluxmonitorv2/helpers_test.go +++ b/core/services/fluxmonitorv2/helpers_test.go @@ -23,7 +23,7 @@ func (fm *FluxMonitor) ExportedProcessLogs() { fm.processLogs() } -func (fm *FluxMonitor) ExportedBacklog() *utils.BoundedPriorityQueue { +func (fm *FluxMonitor) ExportedBacklog() *utils.BoundedPriorityQueue[log.Broadcast] { return fm.backlog } diff --git a/core/services/fluxmonitorv2/integrations_test.go b/core/services/fluxmonitorv2/integrations_test.go index 8f08df3a811..8e6c6763062 100644 --- a/core/services/fluxmonitorv2/integrations_test.go +++ b/core/services/fluxmonitorv2/integrations_test.go @@ -215,7 +215,7 @@ func startApplication( fa fluxAggregatorUniverse, setConfig func(cfg *configtest.TestGeneralConfig), ) *cltest.TestApplication { - config, _ := heavyweight.FullTestDB(t, dbName(t.Name()), true, true) + config, _ := heavyweight.FullTestDB(t, dbName(t.Name())) setConfig(config) app := cltest.NewApplicationWithConfigAndKeyOnSimulatedBlockchain(t, config, fa.backend, fa.key) require.NoError(t, app.Start(testutils.Context(t))) @@ -464,7 +464,7 @@ func TestFluxMonitor_Deviation(t *testing.T) { type v struct { count int - updatedAt string + updatedAt int64 } expectedMeta := map[string]v{} var expMetaMu sync.Mutex @@ -481,7 +481,8 @@ func TestFluxMonitor_Deviation(t *testing.T) { k := m.Meta.LatestAnswer.String() expMetaMu.Lock() curr := expectedMeta[k] - expectedMeta[k] = v{curr.count + 1, m.Meta.UpdatedAt.String()} + assert.True(t, m.Meta.UpdatedAt.IsInt64()) // sanity check unix ts + expectedMeta[k] = v{curr.count + 1, m.Meta.UpdatedAt.Int64()} expMetaMu.Unlock() } }, diff --git a/core/services/fluxmonitorv2/key_store.go b/core/services/fluxmonitorv2/key_store.go index 8d577100ed0..67c5b8e4003 100644 --- a/core/services/fluxmonitorv2/key_store.go +++ b/core/services/fluxmonitorv2/key_store.go @@ -1,6 +1,8 @@ package fluxmonitorv2 import ( + "math/big" + "github.com/ethereum/go-ethereum/common" "github.com/smartcontractkit/chainlink/core/services/keystore" "github.com/smartcontractkit/chainlink/core/services/keystore/keys/ethkey" @@ -10,8 +12,8 @@ import ( // KeyStoreInterface defines an interface to interact with the keystore type KeyStoreInterface interface { - SendingKeys() ([]ethkey.KeyV2, error) - GetRoundRobinAddress(...common.Address) (common.Address, error) + SendingKeys(chainID *big.Int) ([]ethkey.KeyV2, error) + GetRoundRobinAddress(chainID *big.Int, addrs ...common.Address) (common.Address, error) } // KeyStore implements KeyStoreInterface diff --git a/core/services/fluxmonitorv2/key_store_test.go b/core/services/fluxmonitorv2/key_store_test.go index 536bd5b70c0..9391789da13 100644 --- a/core/services/fluxmonitorv2/key_store_test.go +++ b/core/services/fluxmonitorv2/key_store_test.go @@ -1,9 +1,11 @@ package fluxmonitorv2_test import ( + "math/big" "testing" "github.com/smartcontractkit/chainlink/core/internal/cltest" + "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/core/services/fluxmonitorv2" "github.com/stretchr/testify/require" @@ -18,13 +20,24 @@ func TestKeyStore_SendingKeys(t *testing.T) { ks := fluxmonitorv2.NewKeyStore(ethKeyStore) - key, err := ethKeyStore.Create(&cltest.FixtureChainID) + key, err := ethKeyStore.Create(testutils.FixtureChainID) + require.NoError(t, err) + key2, err := ethKeyStore.Create(big.NewInt(1337)) require.NoError(t, err) - keys, err := ks.SendingKeys() + keys, err := ks.SendingKeys(testutils.FixtureChainID) require.NoError(t, err) require.Len(t, keys, 1) require.Equal(t, key, keys[0]) + + keys, err = ks.SendingKeys(big.NewInt(1337)) + require.NoError(t, err) + require.Len(t, keys, 1) + require.Equal(t, key2, keys[0]) + + keys, err = ks.SendingKeys(nil) + require.NoError(t, err) + require.Len(t, keys, 2) } func TestKeyStore_GetRoundRobinAddress(t *testing.T) { @@ -39,7 +52,7 @@ func TestKeyStore_GetRoundRobinAddress(t *testing.T) { ks := fluxmonitorv2.NewKeyStore(ethKeyStore) // Gets the only address in the keystore - addr, err := ks.GetRoundRobinAddress() + addr, err := ks.GetRoundRobinAddress(nil) require.NoError(t, err) require.Equal(t, k0Address, addr) } diff --git a/core/services/fluxmonitorv2/mocks/contract_submitter.go b/core/services/fluxmonitorv2/mocks/contract_submitter.go index adf5e3a9dc3..03a0681ba7a 100644 --- a/core/services/fluxmonitorv2/mocks/contract_submitter.go +++ b/core/services/fluxmonitorv2/mocks/contract_submitter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/fluxmonitorv2/mocks/flags.go b/core/services/fluxmonitorv2/mocks/flags.go index 3a1796fd0ac..beedd862924 100644 --- a/core/services/fluxmonitorv2/mocks/flags.go +++ b/core/services/fluxmonitorv2/mocks/flags.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/fluxmonitorv2/mocks/key_store_interface.go b/core/services/fluxmonitorv2/mocks/key_store_interface.go index 2757e00a2dd..124ddf3405b 100644 --- a/core/services/fluxmonitorv2/mocks/key_store_interface.go +++ b/core/services/fluxmonitorv2/mocks/key_store_interface.go @@ -1,8 +1,10 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks import ( + big "math/big" + common "github.com/ethereum/go-ethereum/common" ethkey "github.com/smartcontractkit/chainlink/core/services/keystore/keys/ethkey" @@ -14,19 +16,20 @@ type KeyStoreInterface struct { mock.Mock } -// GetRoundRobinAddress provides a mock function with given fields: _a0 -func (_m *KeyStoreInterface) GetRoundRobinAddress(_a0 ...common.Address) (common.Address, error) { - _va := make([]interface{}, len(_a0)) - for _i := range _a0 { - _va[_i] = _a0[_i] +// GetRoundRobinAddress provides a mock function with given fields: chainID, addrs +func (_m *KeyStoreInterface) GetRoundRobinAddress(chainID *big.Int, addrs ...common.Address) (common.Address, error) { + _va := make([]interface{}, len(addrs)) + for _i := range addrs { + _va[_i] = addrs[_i] } var _ca []interface{} + _ca = append(_ca, chainID) _ca = append(_ca, _va...) ret := _m.Called(_ca...) var r0 common.Address - if rf, ok := ret.Get(0).(func(...common.Address) common.Address); ok { - r0 = rf(_a0...) + if rf, ok := ret.Get(0).(func(*big.Int, ...common.Address) common.Address); ok { + r0 = rf(chainID, addrs...) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(common.Address) @@ -34,8 +37,8 @@ func (_m *KeyStoreInterface) GetRoundRobinAddress(_a0 ...common.Address) (common } var r1 error - if rf, ok := ret.Get(1).(func(...common.Address) error); ok { - r1 = rf(_a0...) + if rf, ok := ret.Get(1).(func(*big.Int, ...common.Address) error); ok { + r1 = rf(chainID, addrs...) } else { r1 = ret.Error(1) } @@ -43,13 +46,13 @@ func (_m *KeyStoreInterface) GetRoundRobinAddress(_a0 ...common.Address) (common return r0, r1 } -// SendingKeys provides a mock function with given fields: -func (_m *KeyStoreInterface) SendingKeys() ([]ethkey.KeyV2, error) { - ret := _m.Called() +// SendingKeys provides a mock function with given fields: chainID +func (_m *KeyStoreInterface) SendingKeys(chainID *big.Int) ([]ethkey.KeyV2, error) { + ret := _m.Called(chainID) var r0 []ethkey.KeyV2 - if rf, ok := ret.Get(0).(func() []ethkey.KeyV2); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(*big.Int) []ethkey.KeyV2); ok { + r0 = rf(chainID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]ethkey.KeyV2) @@ -57,8 +60,8 @@ func (_m *KeyStoreInterface) SendingKeys() ([]ethkey.KeyV2, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(chainID) } else { r1 = ret.Error(1) } diff --git a/core/services/fluxmonitorv2/mocks/orm.go b/core/services/fluxmonitorv2/mocks/orm.go index d509c97003e..7374d6282d1 100644 --- a/core/services/fluxmonitorv2/mocks/orm.go +++ b/core/services/fluxmonitorv2/mocks/orm.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/fluxmonitorv2/poll_manager_test.go b/core/services/fluxmonitorv2/poll_manager_test.go index b1d879b8b55..ce2d6b0b8ea 100644 --- a/core/services/fluxmonitorv2/poll_manager_test.go +++ b/core/services/fluxmonitorv2/poll_manager_test.go @@ -83,8 +83,6 @@ func watchTicks(t *testing.T, pm *fluxmonitorv2.PollManager, waitDuration time.D } func TestPollManager_PollTicker(t *testing.T) { - t.Parallel() - pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ PollTickerInterval: pollTickerDefaultDuration, PollTickerDisabled: false, @@ -105,8 +103,6 @@ func TestPollManager_PollTicker(t *testing.T) { } func TestPollManager_IdleTimer(t *testing.T) { - t.Parallel() - pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ PollTickerInterval: 100 * time.Millisecond, PollTickerDisabled: true, @@ -129,8 +125,6 @@ func TestPollManager_IdleTimer(t *testing.T) { } func TestPollManager_RoundTimer(t *testing.T) { - t.Parallel() - pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ PollTickerInterval: pollTickerDefaultDuration, PollTickerDisabled: true, @@ -198,8 +192,6 @@ func TestPollManager_InitialPoll(t *testing.T) { } func TestPollManager_HibernationTimer(t *testing.T) { - t.Parallel() - pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ PollTickerInterval: pollTickerDefaultDuration, PollTickerDisabled: true, @@ -221,8 +213,6 @@ func TestPollManager_HibernationTimer(t *testing.T) { } func TestPollManager_HibernationOnStartThenAwaken(t *testing.T) { - t.Parallel() - pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ PollTickerInterval: pollTickerDefaultDuration, PollTickerDisabled: false, @@ -257,8 +247,6 @@ func TestPollManager_HibernationOnStartThenAwaken(t *testing.T) { } func TestPollManager_AwakeOnStartThenHibernate(t *testing.T) { - t.Parallel() - pm := newPollManager(t) pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ @@ -350,8 +338,6 @@ func TestPollManager_ShouldPerformInitialPoll(t *testing.T) { } func TestPollManager_Stop(t *testing.T) { - t.Parallel() - pm := newPollManager(t) pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ @@ -375,8 +361,6 @@ func TestPollManager_Stop(t *testing.T) { } func TestPollManager_ResetIdleTimer(t *testing.T) { - t.Parallel() - pm := newPollManager(t) // Start again in awake mode @@ -397,8 +381,6 @@ func TestPollManager_ResetIdleTimer(t *testing.T) { } func TestPollManager_ResetIdleTimerWhenHibernating(t *testing.T) { - t.Parallel() - pm := newPollManager(t) // Start in hibernation @@ -419,8 +401,6 @@ func TestPollManager_ResetIdleTimerWhenHibernating(t *testing.T) { } func TestPollManager_Reset(t *testing.T) { - t.Parallel() - pm := newPollManager(t) // Start again in awake mode @@ -448,8 +428,6 @@ func TestPollManager_Reset(t *testing.T) { } func TestPollManager_ResetWhenHibernating(t *testing.T) { - t.Parallel() - pm := newPollManager(t) // Start in hibernation diff --git a/core/services/job/job_orm_test.go b/core/services/job/job_orm_test.go index c6c8a6392f6..e230ffb6bda 100644 --- a/core/services/job/job_orm_test.go +++ b/core/services/job/job_orm_test.go @@ -3,6 +3,7 @@ package job_test import ( "context" "database/sql" + "fmt" "testing" "time" @@ -268,7 +269,7 @@ func TestORM_DeleteJob_DeletesAssociatedRecords(t *testing.T) { }) t.Run("it deletes records for keeper jobs", func(t *testing.T) { - registry, keeperJob := cltest.MustInsertKeeperRegistry(t, db, korm, keyStore.Eth()) + registry, keeperJob := cltest.MustInsertKeeperRegistry(t, db, korm, keyStore.Eth(), 0, 1, 20) cltest.MustInsertUpkeepForRegistry(t, db, config, registry) cltest.AssertCount(t, db, "keeper_specs", 1) @@ -340,7 +341,10 @@ func TestORM_CreateJob_VRFV2(t *testing.T) { jb, err := vrf.ValidatedVRFSpec(testspecs.GenerateVRFSpec( testspecs.VRFSpecParams{ RequestedConfsDelay: 10, - FromAddresses: fromAddresses}). + FromAddresses: fromAddresses, + ChunkSize: 25, + BackoffInitialDelay: time.Minute, + BackoffMaxDelay: time.Hour}). Toml()) require.NoError(t, err) @@ -350,9 +354,24 @@ func TestORM_CreateJob_VRFV2(t *testing.T) { var requestedConfsDelay int64 require.NoError(t, db.Get(&requestedConfsDelay, `SELECT requested_confs_delay FROM vrf_specs LIMIT 1`)) require.Equal(t, int64(10), requestedConfsDelay) + var batchFulfillmentEnabled bool + require.NoError(t, db.Get(&batchFulfillmentEnabled, `SELECT batch_fulfillment_enabled FROM vrf_specs LIMIT 1`)) + require.False(t, batchFulfillmentEnabled) + var batchFulfillmentGasMultiplier float64 + require.NoError(t, db.Get(&batchFulfillmentGasMultiplier, `SELECT batch_fulfillment_gas_multiplier FROM vrf_specs LIMIT 1`)) + require.Equal(t, float64(1.0), batchFulfillmentGasMultiplier) var requestTimeout time.Duration require.NoError(t, db.Get(&requestTimeout, `SELECT request_timeout FROM vrf_specs LIMIT 1`)) require.Equal(t, 24*time.Hour, requestTimeout) + var backoffInitialDelay time.Duration + require.NoError(t, db.Get(&backoffInitialDelay, `SELECT backoff_initial_delay FROM vrf_specs LIMIT 1`)) + require.Equal(t, time.Minute, backoffInitialDelay) + var backoffMaxDelay time.Duration + require.NoError(t, db.Get(&backoffMaxDelay, `SELECT backoff_max_delay FROM vrf_specs LIMIT 1`)) + require.Equal(t, time.Hour, backoffMaxDelay) + var chunkSize int + require.NoError(t, db.Get(&chunkSize, `SELECT chunk_size FROM vrf_specs LIMIT 1`)) + require.Equal(t, 25, chunkSize) var fa pq.ByteaArray require.NoError(t, db.Get(&fa, `SELECT from_addresses FROM vrf_specs LIMIT 1`)) var actual []string @@ -404,6 +423,89 @@ func TestORM_CreateJob_OCRBootstrap(t *testing.T) { cltest.AssertCount(t, db, "jobs", 0) } +func TestORM_CreateJob_OCR_DuplicatedContractAddress(t *testing.T) { + config := evmtest.NewChainScopedConfig(t, cltest.NewTestGeneralConfig(t)) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config) + keyStore.OCR().Add(cltest.DefaultOCRKey) + + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config) + cc := evmtest.NewChainSet(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config}) + jobORM := job.NewTestORM(t, db, cc, pipelineORM, keyStore, config) + + chain, err := cc.Default() + require.NoError(t, err) + + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config) + + t.Run("with the default chain id", func(t *testing.T) { + spec := testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + TransmitterAddress: address.Hex(), + }) + jb, err := ocr.ValidatedOracleSpecToml(cc, spec.Toml()) + require.NoError(t, err) + + err = jobORM.CreateJob(&jb) + require.NoError(t, err) + cltest.AssertCount(t, db, "ocr_oracle_specs", 1) + cltest.AssertCount(t, db, "jobs", 1) + + spec2 := testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + EVMChainID: chain.ID().String(), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + TransmitterAddress: address.Hex(), + }) + jb2, err := ocr.ValidatedOracleSpecToml(cc, spec2.Toml()) + require.NoError(t, err) + + err = jobORM.CreateJob(&jb2) + require.Error(t, err) + assert.Equal(t, err.Error(), fmt.Sprintf("CreateJobFailed: a job with contract address %s already exists for chain ID %d", jb2.OCROracleSpec.ContractAddress, jb2.OCROracleSpec.EVMChainID.ToInt())) + }) + + t.Run("with a set chain id", func(t *testing.T) { + externalJobID := uuid.NullUUID{UUID: uuid.NewV4(), Valid: true} + _, contractAddress := cltest.MustInsertRandomKey(t, keyStore.Eth()) + + spec := testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + EVMChainID: chain.ID().String(), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + TransmitterAddress: address.Hex(), + ContractAddress: contractAddress.Hex(), + JobID: externalJobID.UUID.String(), + Name: "with a chain id", + }) + + jb, err := ocr.ValidatedOracleSpecToml(cc, spec.Toml()) + require.NoError(t, err) + + err = jobORM.CreateJob(&jb) + require.NoError(t, err) + + spec2 := testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + EVMChainID: chain.ID().String(), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + TransmitterAddress: address.Hex(), + ContractAddress: contractAddress.Hex(), + JobID: externalJobID.UUID.String(), + Name: "with a chain id 2", + }) + jb2, err := ocr.ValidatedOracleSpecToml(cc, spec2.Toml()) + require.NoError(t, err) + + err = jobORM.CreateJob(&jb2) + require.Error(t, err) + assert.Equal(t, err.Error(), fmt.Sprintf("CreateJobFailed: a job with contract address %s already exists for chain ID %d", jb2.OCROracleSpec.ContractAddress, chain.ID())) + }) +} + func Test_FindJobs(t *testing.T) { t.Parallel() diff --git a/core/services/job/job_pipeline_orm_integration_test.go b/core/services/job/job_pipeline_orm_integration_test.go index 4ec11773439..d000ae6857a 100644 --- a/core/services/job/job_pipeline_orm_integration_test.go +++ b/core/services/job/job_pipeline_orm_integration_test.go @@ -5,6 +5,10 @@ import ( "testing" "time" + "github.com/smartcontractkit/sqlx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/testutils/evmtest" "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" @@ -12,9 +16,6 @@ import ( "github.com/smartcontractkit/chainlink/core/services/job" "github.com/smartcontractkit/chainlink/core/services/pipeline" "github.com/smartcontractkit/chainlink/core/store/models" - "github.com/smartcontractkit/sqlx" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func clearJobsDb(t *testing.T, db *sqlx.DB) { @@ -30,7 +31,7 @@ func TestPipelineORM_Integration(t *testing.T) { // data source 2 ds2 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}>]; - ds2_parse [type=jsonparse path="three,four"]; + ds2_parse [type=jsonparse path="three.four" separator="."]; ds2_multiply [type=multiply times=4.56]; ds1 -> ds1_parse -> ds1_multiply -> answer1; diff --git a/core/services/job/mocks/delegate.go b/core/services/job/mocks/delegate.go index e6a6765ed2e..799a5b97303 100644 --- a/core/services/job/mocks/delegate.go +++ b/core/services/job/mocks/delegate.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/job/mocks/orm.go b/core/services/job/mocks/orm.go index 239e39f8fe9..7d7e6f16774 100644 --- a/core/services/job/mocks/orm.go +++ b/core/services/job/mocks/orm.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/job/mocks/service.go b/core/services/job/mocks/service.go index 2916994dfcc..5d0a73efee9 100644 --- a/core/services/job/mocks/service.go +++ b/core/services/job/mocks/service.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/job/mocks/service_ctx.go b/core/services/job/mocks/service_ctx.go index 40db8ccf3c7..5f3837a13f8 100644 --- a/core/services/job/mocks/service_ctx.go +++ b/core/services/job/mocks/service_ctx.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/job/mocks/spawner.go b/core/services/job/mocks/spawner.go index 8aa7b525f24..831a30c321b 100644 --- a/core/services/job/mocks/spawner.go +++ b/core/services/job/mocks/spawner.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/job/models.go b/core/services/job/models.go index b32f721043c..821d10b0a60 100644 --- a/core/services/job/models.go +++ b/core/services/job/models.go @@ -424,7 +424,19 @@ type KeeperSpec struct { } type VRFSpec struct { - ID int32 + ID int32 + + // BatchCoordinatorAddress is the address of the batch vrf coordinator to use. + // This is required if batchFulfillmentEnabled is set to true in the job spec. + BatchCoordinatorAddress *ethkey.EIP55Address `toml:"batchCoordinatorAddress"` + // BatchFulfillmentEnabled indicates to the vrf job to use the batch vrf coordinator + // for fulfilling requests. If set to true, batchCoordinatorAddress must be set in + // the job spec. + BatchFulfillmentEnabled bool `toml:"batchFulfillmentEnabled"` + // BatchFulfillmentGasMultiplier is used to determine the final gas estimate for the batch + // fulfillment. + BatchFulfillmentGasMultiplier float64 `toml:"batchFulfillmentGasMultiplier"` + CoordinatorAddress ethkey.EIP55Address `toml:"coordinatorAddress"` PublicKey secp256k1.PublicKey `toml:"publicKey"` MinIncomingConfirmations uint32 `toml:"minIncomingConfirmations"` @@ -435,8 +447,21 @@ type VRFSpec struct { PollPeriodEnv bool RequestedConfsDelay int64 `toml:"requestedConfsDelay"` // For v2 jobs. Optional, defaults to 0 if not provided. RequestTimeout time.Duration `toml:"requestTimeout"` // Optional, defaults to 24hr if not provided. - CreatedAt time.Time `toml:"-"` - UpdatedAt time.Time `toml:"-"` + + // ChunkSize is the number of pending VRF V2 requests to process in parallel. Optional, defaults + // to 20 if not provided. + ChunkSize uint32 `toml:"chunkSize"` + + // BackoffInitialDelay is the amount of time to wait before retrying a failed request after the + // first failure. V2 only. + BackoffInitialDelay time.Duration `toml:"backoffInitialDelay"` + + // BackoffMaxDelay is the maximum amount of time to wait before retrying a failed request. V2 + // only. + BackoffMaxDelay time.Duration `toml:"backoffMaxDelay"` + + CreatedAt time.Time `toml:"-"` + UpdatedAt time.Time `toml:"-"` } // BlockhashStoreSpec defines the job spec for the blockhash store feeder. diff --git a/core/services/job/orm.go b/core/services/job/orm.go index 489c60b4b69..1ce4978b340 100644 --- a/core/services/job/orm.go +++ b/core/services/job/orm.go @@ -15,6 +15,8 @@ import ( uuid "github.com/satori/go.uuid" "go.uber.org/multierr" + "github.com/smartcontractkit/sqlx" + "github.com/smartcontractkit/chainlink/core/bridges" "github.com/smartcontractkit/chainlink/core/chains/evm" "github.com/smartcontractkit/chainlink/core/config" @@ -28,7 +30,7 @@ import ( "github.com/smartcontractkit/chainlink/core/services/pipeline" relaytypes "github.com/smartcontractkit/chainlink/core/services/relay/types" "github.com/smartcontractkit/chainlink/core/store/models" - "github.com/smartcontractkit/sqlx" + "github.com/smartcontractkit/chainlink/core/utils" ) var ( @@ -179,6 +181,29 @@ func (o *orm) CreateJob(jb *Job, qopts ...pg.QOpt) error { } } + existingSpec := new(OCROracleSpec) + err := tx.Get(existingSpec, `SELECT * FROM ocr_oracle_specs WHERE contract_address = $1 and (evm_chain_id = $2 or evm_chain_id IS NULL) LIMIT 1;`, + jb.OCROracleSpec.ContractAddress, jb.OCROracleSpec.EVMChainID, + ) + if !errors.Is(err, sql.ErrNoRows) { + if err != nil { + return errors.Wrap(err, "failed to validate OffchainreportingOracleSpec on creation") + } + + matchErr := errors.Errorf("a job with contract address %s already exists for chain ID %d", jb.OCROracleSpec.ContractAddress, jb.OCROracleSpec.EVMChainID.ToInt()) + if existingSpec.EVMChainID == nil { + chain, err2 := o.chainSet.Default() + if err2 != nil { + return errors.Wrap(err2, "failed to validate OffchainreportingOracleSpec on creation") + } + if jb.OCROracleSpec.EVMChainID.Equal((*utils.Big)(chain.ID())) { + return matchErr + } + } else { + return matchErr + } + } + sql := `INSERT INTO ocr_oracle_specs (contract_address, p2p_bootstrap_peers, is_bootstrap_peer, encrypted_ocr_key_bundle_id, transmitter_address, observation_timeout, blockchain_timeout, contract_config_tracker_subscribe_interval, contract_config_tracker_poll_interval, contract_config_confirmations, evm_chain_id, created_at, updated_at, database_timeout, observation_grace_period, contract_transmitter_transmit_timeout) @@ -186,7 +211,7 @@ func (o *orm) CreateJob(jb *Job, qopts ...pg.QOpt) error { :observation_timeout, :blockchain_timeout, :contract_config_tracker_subscribe_interval, :contract_config_tracker_poll_interval, :contract_config_confirmations, :evm_chain_id, NOW(), NOW(), :database_timeout, :observation_grace_period, :contract_transmitter_transmit_timeout) RETURNING id;` - err := pg.PrepareQueryRowx(tx, sql, &specID, jb.OCROracleSpec) + err = pg.PrepareQueryRowx(tx, sql, &specID, jb.OCROracleSpec) if err != nil { return errors.Wrap(err, "failed to create OffchainreportingOracleSpec") } @@ -266,8 +291,18 @@ func (o *orm) CreateJob(jb *Job, qopts ...pg.QOpt) error { jb.CronSpecID = &specID case VRF: var specID int32 - sql := `INSERT INTO vrf_specs (coordinator_address, public_key, min_incoming_confirmations, evm_chain_id, from_addresses, poll_period, requested_confs_delay, request_timeout, created_at, updated_at) - VALUES (:coordinator_address, :public_key, :min_incoming_confirmations, :evm_chain_id, :from_addresses, :poll_period, :requested_confs_delay, :request_timeout, NOW(), NOW()) + sql := `INSERT INTO vrf_specs ( + coordinator_address, public_key, min_incoming_confirmations, + evm_chain_id, from_addresses, poll_period, requested_confs_delay, + request_timeout, chunk_size, batch_coordinator_address, batch_fulfillment_enabled, + batch_fulfillment_gas_multiplier, backoff_initial_delay, backoff_max_delay, + created_at, updated_at) + VALUES ( + :coordinator_address, :public_key, :min_incoming_confirmations, + :evm_chain_id, :from_addresses, :poll_period, :requested_confs_delay, + :request_timeout, :chunk_size, :batch_coordinator_address, :batch_fulfillment_enabled, + :batch_fulfillment_gas_multiplier, :backoff_initial_delay, :backoff_max_delay, + NOW(), NOW()) RETURNING id;` err := pg.PrepareQueryRowx(tx, sql, &specID, toVRFSpecRow(jb.VRFSpec)) diff --git a/core/services/job/runner_integration_test.go b/core/services/job/runner_integration_test.go index 85c394296ca..5949dab95f0 100644 --- a/core/services/job/runner_integration_test.go +++ b/core/services/job/runner_integration_test.go @@ -14,19 +14,19 @@ import ( "testing" "time" - "github.com/smartcontractkit/chainlink/core/chains" evmconfigmocks "github.com/smartcontractkit/chainlink/core/chains/evm/config/mocks" evmmocks "github.com/smartcontractkit/chainlink/core/chains/evm/mocks" + "github.com/smartcontractkit/chainlink/core/logger" ocr2mocks "github.com/smartcontractkit/chainlink/core/services/ocr2/mocks" "github.com/smartcontractkit/chainlink/core/services/ocr2/validate" "github.com/smartcontractkit/chainlink/core/auth" "github.com/smartcontractkit/chainlink/core/bridges" + pkgconfig "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/internal/testutils/evmtest" "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" - "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/job" "github.com/smartcontractkit/chainlink/core/services/keystore/keys/ethkey" "github.com/smartcontractkit/chainlink/core/services/ocr" @@ -57,7 +57,7 @@ func TestRunner(t *testing.T) { keyStore := cltest.NewKeyStore(t, db, config) ethKeyStore := keyStore.Eth() - ethClient, _ := cltest.NewEthMocksWithDefaultChain(t) + ethClient := cltest.NewEthMocksWithDefaultChain(t) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(10), nil) ethClient.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(nil, nil) @@ -174,7 +174,7 @@ func TestRunner(t *testing.T) { // Reference a different one cfg := new(evmconfigmocks.ChainScopedConfig) cfg.On("Dev").Return(true) - cfg.On("ChainType").Return(chains.ChainType("")) + cfg.On("ChainType").Return(pkgconfig.ChainType("")) c := new(evmmocks.Chain) c.On("Config").Return(cfg) cs := new(evmmocks.ChainSet) @@ -452,6 +452,7 @@ ds1 -> ds1_parse; nil, cc, logger.TestLogger(t), + config, ) _, err = sd.ServicesForSpec(jb) // We expect this to fail as neither the required vars are not set either via the env nor the job itself. @@ -490,6 +491,7 @@ ds1 -> ds1_parse; monitoringEndpoint, cc, lggr, + config, ) _, err = sd.ServicesForSpec(jb) require.NoError(t, err) @@ -542,6 +544,7 @@ ds1 -> ds1_parse; monitoringEndpoint, cc, lggr, + config, ) _, err = sd.ServicesForSpec(jb) require.NoError(t, err) @@ -576,6 +579,7 @@ ds1 -> ds1_parse; monitoringEndpoint, cc, lggr, + config, ) _, err = sd.ServicesForSpec(jb) require.NoError(t, err) @@ -604,6 +608,7 @@ ds1 -> ds1_parse; monitoringEndpoint, cc, lggr, + config, ) _, err = sd.ServicesForSpec(jb) require.NoError(t, err) @@ -635,6 +640,7 @@ ds1 -> ds1_parse; monitoringEndpoint, cc, lggr, + config, ) services, err := sd.ServicesForSpec(*jb) require.NoError(t, err) diff --git a/core/services/job/spawner_test.go b/core/services/job/spawner_test.go index b16ed866c19..accd3cc317e 100644 --- a/core/services/job/spawner_test.go +++ b/core/services/job/spawner_test.go @@ -60,7 +60,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config) _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config) - ethClient, _ := cltest.NewEthMocksWithDefaultChain(t) + ethClient := cltest.NewEthMocksWithDefaultChain(t) ethClient.On("CallContext", mock.Anything, mock.Anything, "eth_getBlockByNumber", mock.Anything, false). Run(func(args mock.Arguments) { head := args.Get(1).(**evmtypes.Head) @@ -100,7 +100,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { serviceA2 := new(mocks.ServiceCtx) serviceA1.On("Start", mock.Anything).Return(nil).Once() serviceA2.On("Start", mock.Anything).Return(nil).Once().Run(func(mock.Arguments) { eventuallyA.ItHappened() }) - dA := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, cc, logger.TestLogger(t)) + dA := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, cc, logger.TestLogger(t), config) delegateA := &delegate{jobA.Type, []job.ServiceCtx{serviceA1, serviceA2}, 0, make(chan struct{}), dA} eventuallyB := cltest.NewAwaiter() serviceB1 := new(mocks.ServiceCtx) @@ -108,7 +108,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { serviceB1.On("Start", mock.Anything).Return(nil).Once() serviceB2.On("Start", mock.Anything).Return(nil).Once().Run(func(mock.Arguments) { eventuallyB.ItHappened() }) - dB := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, cc, logger.TestLogger(t)) + dB := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, cc, logger.TestLogger(t), config) delegateB := &delegate{jobB.Type, []job.ServiceCtx{serviceB1, serviceB2}, 0, make(chan struct{}), dB} spawner := job.NewSpawner(orm, config, map[job.Type]job.Delegate{ jobA.Type: delegateA, @@ -163,7 +163,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { lggr := logger.TestLogger(t) orm := job.NewTestORM(t, db, cc, pipeline.NewORM(db, lggr, config), keyStore, config) - d := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, cc, logger.TestLogger(t)) + d := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, cc, logger.TestLogger(t), config) delegateA := &delegate{jobA.Type, []job.ServiceCtx{serviceA1, serviceA2}, 0, nil, d} spawner := job.NewSpawner(orm, config, map[job.Type]job.Delegate{ jobA.Type: delegateA, @@ -199,7 +199,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { lggr := logger.TestLogger(t) orm := job.NewTestORM(t, db, cc, pipeline.NewORM(db, lggr, config), keyStore, config) - d := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, cc, logger.TestLogger(t)) + d := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, cc, logger.TestLogger(t), config) delegateA := &delegate{jobA.Type, []job.ServiceCtx{serviceA1, serviceA2}, 0, nil, d} spawner := job.NewSpawner(orm, config, map[job.Type]job.Delegate{ jobA.Type: delegateA, diff --git a/core/services/keeper/common.go b/core/services/keeper/common.go index 834b2569608..3d2d27a14a0 100644 --- a/core/services/keeper/common.go +++ b/core/services/keeper/common.go @@ -21,5 +21,7 @@ type Config interface { KeeperRegistrySyncInterval() time.Duration KeeperRegistrySyncUpkeepQueueSize() uint32 KeeperCheckUpkeepGasPriceFeatureEnabled() bool + KeeperTurnLookBack() int64 + KeeperTurnFlagEnabled() bool LogSQL() bool } diff --git a/core/services/keeper/delegate.go b/core/services/keeper/delegate.go index 797ac415ae5..4d8d931ea08 100644 --- a/core/services/keeper/delegate.go +++ b/core/services/keeper/delegate.go @@ -95,6 +95,7 @@ func (d *Delegate) ServicesForSpec(spec job.Job) (services []job.ServiceCtx, err MinIncomingConfirmations: minIncomingConfirmations, Logger: svcLogger, SyncUpkeepQueueSize: chain.Config().KeeperRegistrySyncUpkeepQueueSize(), + newTurnEnabled: chain.Config().KeeperTurnFlagEnabled(), }) upkeepExecuter := NewUpkeepExecuter( spec, diff --git a/core/services/keeper/integration_test.go b/core/services/keeper/integration_test.go index e2b940ad373..b2889b27218 100644 --- a/core/services/keeper/integration_test.go +++ b/core/services/keeper/integration_test.go @@ -105,7 +105,7 @@ func TestKeeperEthIntegration(t *testing.T) { backend.Commit() // setup app - config, db := heavyweight.FullTestDB(t, fmt.Sprintf("keeper_eth_integration_%v", test.eip1559), true, true) + config, db := heavyweight.FullTestDB(t, fmt.Sprintf("keeper_eth_integration_%v", test.eip1559)) korm := keeper.NewORM(db, logger.TestLogger(t), nil, nil) config.Overrides.GlobalEvmEIP1559DynamicFees = null.BoolFrom(test.eip1559) d := 24 * time.Hour @@ -119,6 +119,10 @@ func TestKeeperEthIntegration(t *testing.T) { config.Overrides.KeeperMaximumGracePeriod = null.IntFrom(0) // test with gas price feature enabled config.Overrides.KeeperCheckUpkeepGasPriceFeatureEnabled = null.BoolFrom(true) + // testing doesn't need to do far look back + config.Overrides.KeeperTurnLookBack = null.IntFrom(0) + // testing new turn taking + config.Overrides.KeeperTurnFlagEnabled = null.BoolFrom(true) // helps prevent missed heads config.Overrides.GlobalEvmHeadTrackerMaxBufferSize = null.IntFrom(100) diff --git a/core/services/keeper/models.go b/core/services/keeper/models.go index 9843efb6a79..71b2841e7b4 100644 --- a/core/services/keeper/models.go +++ b/core/services/keeper/models.go @@ -1,6 +1,15 @@ package keeper -import "github.com/smartcontractkit/chainlink/core/services/keystore/keys/ethkey" +import ( + "database/sql/driver" + "encoding/json" + "fmt" + + "github.com/smartcontractkit/chainlink/core/null" + "github.com/smartcontractkit/chainlink/core/services/keystore/keys/ethkey" +) + +type KeeperIndexMap map[ethkey.EIP55Address]int32 type Registry struct { ID int64 @@ -11,6 +20,7 @@ type Registry struct { JobID int32 KeeperIndex int32 NumKeepers int32 + KeeperIndexMap KeeperIndexMap } type UpkeepRegistration struct { ID int32 @@ -20,5 +30,23 @@ type UpkeepRegistration struct { RegistryID int64 Registry Registry UpkeepID int64 + LastKeeperIndex null.Int64 PositioningConstant int32 } + +func (k *KeeperIndexMap) Scan(val interface{}) error { + switch v := val.(type) { + case []byte: + err := json.Unmarshal(v, &k) + return err + case string: + err := json.Unmarshal([]byte(v), &k) + return err + default: + return fmt.Errorf("unsupported type: %T", v) + } +} + +func (k *KeeperIndexMap) Value() (driver.Value, error) { + return json.Marshal(&k) +} diff --git a/core/services/keeper/orm.go b/core/services/keeper/orm.go index 0e0796a92e9..9bcfa48d163 100644 --- a/core/services/keeper/orm.go +++ b/core/services/keeper/orm.go @@ -44,6 +44,13 @@ func (korm ORM) Registries() ([]Registry, error) { return registries, errors.Wrap(err, "failed to get registries") } +// RegistryByContractAddress returns a single registry based on provided address +func (korm ORM) RegistryByContractAddress(registryAddress ethkey.EIP55Address) (Registry, error) { + var registry Registry + err := korm.q.Get(®istry, `SELECT * FROM keeper_registries WHERE keeper_registries.contract_address = $1`, registryAddress) + return registry, errors.Wrap(err, "failed to get registry") +} + // RegistryForJob returns a specific registry for a job with the given ID func (korm ORM) RegistryForJob(jobID int32) (Registry, error) { var registry Registry @@ -54,13 +61,14 @@ func (korm ORM) RegistryForJob(jobID int32) (Registry, error) { // UpsertRegistry upserts registry by the given input func (korm ORM) UpsertRegistry(registry *Registry) error { stmt := ` -INSERT INTO keeper_registries (job_id, keeper_index, contract_address, from_address, check_gas, block_count_per_turn, num_keepers) VALUES ( -:job_id, :keeper_index, :contract_address, :from_address, :check_gas, :block_count_per_turn, :num_keepers +INSERT INTO keeper_registries (job_id, keeper_index, contract_address, from_address, check_gas, block_count_per_turn, num_keepers, keeper_index_map) VALUES ( +:job_id, :keeper_index, :contract_address, :from_address, :check_gas, :block_count_per_turn, :num_keepers, :keeper_index_map ) ON CONFLICT (job_id) DO UPDATE SET keeper_index = :keeper_index, check_gas = :check_gas, block_count_per_turn = :block_count_per_turn, - num_keepers = :num_keepers + num_keepers = :num_keepers, + keeper_index_map = :keeper_index_map RETURNING * ` err := korm.q.GetNamed(stmt, registry, registry) @@ -99,10 +107,57 @@ DELETE FROM upkeep_registrations WHERE registry_id IN ( return rowsAffected, nil } -func (korm ORM) EligibleUpkeepsForRegistry(registryAddress ethkey.EIP55Address, blockNumber, gracePeriod int64) (upkeeps []UpkeepRegistration, err error) { +//EligibleUpkeepsForRegistry fetches eligible upkeeps for processing +//The query checks the following conditions +// - checks the registry address is correct and the registry has some keepers associated +// -- is it my turn AND my keeper was not the last perform for this upkeep OR my keeper was the last before BUT it is past the grace period +// -- OR is it my buddy's turn AND they were the last keeper to do the perform for this upkeep +func (korm ORM) NewEligibleUpkeepsForRegistry(registryAddress ethkey.EIP55Address, blockNumber int64, gracePeriod int64, binaryHash string) (upkeeps []UpkeepRegistration, err error) { stmt := ` SELECT upkeep_registrations.* FROM upkeep_registrations INNER JOIN keeper_registries ON keeper_registries.id = upkeep_registrations.registry_id +WHERE + keeper_registries.contract_address = $1 AND + keeper_registries.num_keepers > 0 AND + (( + keeper_registries.keeper_index = ((CAST(upkeep_registrations.upkeep_id AS bit(32)) # + CAST($4 AS bit(32)))::bigint % keeper_registries.num_keepers) + AND + ( + upkeep_registrations.last_keeper_index IS DISTINCT FROM keeper_registries.keeper_index + OR + (upkeep_registrations.last_keeper_index IS NOT DISTINCT FROM keeper_registries.keeper_index AND upkeep_registrations.last_run_block_height + $2 < $3) + ) + ) + OR + ( + (keeper_registries.keeper_index + 1) % keeper_registries.num_keepers = + ((CAST(upkeep_registrations.upkeep_id AS bit(32)) # + CAST($4 AS bit(32)))::bigint % keeper_registries.num_keepers) + AND + upkeep_registrations.last_keeper_index IS NOT DISTINCT FROM (keeper_registries.keeper_index + 1) % keeper_registries.num_keepers + )) +` + if err = korm.q.Select(&upkeeps, stmt, registryAddress, gracePeriod, blockNumber, binaryHash); err != nil { + return upkeeps, errors.Wrap(err, "EligibleUpkeepsForRegistry failed to get upkeep_registrations") + } + if err = loadUpkeepsRegistry(korm.q, upkeeps); err != nil { + return upkeeps, errors.Wrap(err, "EligibleUpkeepsForRegistry failed to load Registry on upkeeps") + } + + rand.Seed(time.Now().UnixNano()) + rand.Shuffle(len(upkeeps), func(i, j int) { + upkeeps[i], upkeeps[j] = upkeeps[j], upkeeps[i] + }) + + return upkeeps, err +} + +func (korm ORM) EligibleUpkeepsForRegistry(registryAddress ethkey.EIP55Address, blockNumber, gracePeriod int64) (upkeeps []UpkeepRegistration, err error) { + err = korm.q.Transaction(func(tx pg.Queryer) error { + stmt := ` +SELECT upkeep_registrations.* FROM upkeep_registrations +INNER JOIN keeper_registries ON keeper_registries.id = upkeep_registrations.registry_id WHERE keeper_registries.contract_address = $1 AND keeper_registries.num_keepers > 0 AND @@ -117,12 +172,14 @@ WHERE ) % keeper_registries.num_keepers ORDER BY upkeep_registrations.id ASC, upkeep_registrations.upkeep_id ASC ` - if err = korm.q.Select(&upkeeps, stmt, registryAddress, gracePeriod, blockNumber); err != nil { - return upkeeps, errors.Wrap(err, "EligibleUpkeepsForRegistry failed to get upkeep_registrations") - } - if err = loadUpkeepsRegistry(korm.q, upkeeps); err != nil { - return upkeeps, errors.Wrap(err, "EligibleUpkeepsForRegistry failed to load Registry on upkeeps") - } + if err = tx.Select(&upkeeps, stmt, registryAddress, gracePeriod, blockNumber); err != nil { + return errors.Wrap(err, "EligibleUpkeepsForRegistry failed to get upkeep_registrations") + } + if err = loadUpkeepsRegistry(tx, upkeeps); err != nil { + return errors.Wrap(err, "EligibleUpkeepsForRegistry failed to load Registry on upkeeps") + } + return nil + }, pg.OptReadOnlyTx()) rand.Seed(time.Now().UnixNano()) rand.Shuffle(len(upkeeps), func(i, j int) { @@ -166,13 +223,14 @@ WHERE registry_id = $1 return nextID, errors.Wrap(err, "LowestUnsyncedID failed") } -func (korm ORM) SetLastRunHeightForUpkeepOnJob(jobID int32, upkeepID, height int64, qopts ...pg.QOpt) error { +//SetLastRunInfoForUpkeepOnJob sets the last run block height and the associated keeper index only if the new block height is greater than the previous. +func (korm ORM) SetLastRunInfoForUpkeepOnJob(jobID int32, upkeepID, height int64, fromAddress ethkey.EIP55Address, qopts ...pg.QOpt) error { _, err := korm.q.WithOpts(qopts...).Exec(` -UPDATE upkeep_registrations -SET last_run_block_height = $1 -WHERE upkeep_id = $2 AND -registry_id = ( - SELECT id FROM keeper_registries WHERE job_id = $3 -)`, height, upkeepID, jobID) - return errors.Wrap(err, "SetLastRunHeightForUpkeepOnJob failed") + UPDATE upkeep_registrations + SET last_run_block_height = $1, + last_keeper_index = CAST((SELECT keeper_index_map -> $4 FROM keeper_registries WHERE job_id = $3) as int) + WHERE upkeep_id = $2 AND + registry_id = (SELECT id FROM keeper_registries WHERE job_id = $3) AND + last_run_block_height < $1`, height, upkeepID, jobID, fromAddress.Hex()) + return errors.Wrap(err, "SetLastRunInfoForUpkeepOnJob failed") } diff --git a/core/services/keeper/orm_old_turn_test.go b/core/services/keeper/orm_old_turn_test.go new file mode 100644 index 00000000000..56be073ef48 --- /dev/null +++ b/core/services/keeper/orm_old_turn_test.go @@ -0,0 +1,226 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/core/internal/cltest" + "github.com/smartcontractkit/chainlink/core/services/keeper" +) + +func TestKeeperDB_EligibleUpkeeps_BlockCountPerTurn(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() + + blockheight := int64(63) + gracePeriod := int64(10) + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + + upkeeps := [5]keeper.UpkeepRegistration{ + newUpkeep(registry, 0), + newUpkeep(registry, 1), + newUpkeep(registry, 2), + newUpkeep(registry, 3), + newUpkeep(registry, 4), + } + + upkeeps[0].LastRunBlockHeight = 0 // Never run + upkeeps[1].LastRunBlockHeight = 41 // Run last turn, outside grade period + upkeeps[2].LastRunBlockHeight = 46 // Run last turn, outside grade period + upkeeps[3].LastRunBlockHeight = 59 // Run last turn, inside grace period (EXCLUDE) + upkeeps[4].LastRunBlockHeight = 61 // Run this turn, inside grace period (EXCLUDE) + + for i := range upkeeps { + upkeeps[i].PositioningConstant = int32(i) + err := orm.UpsertUpkeep(&upkeeps[i]) + require.NoError(t, err) + } + + cltest.AssertCount(t, db, "upkeep_registrations", 5) + + eligibleUpkeeps, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, blockheight, gracePeriod) + assert.NoError(t, err) + + // 3 out of 5 are eligible, check that ids are 0,1 or 2 but order is shuffled so can not use equals + require.Len(t, eligibleUpkeeps, 3) + assert.Less(t, eligibleUpkeeps[0].UpkeepID, int64(3)) + assert.Less(t, eligibleUpkeeps[1].UpkeepID, int64(3)) + assert.Less(t, eligibleUpkeeps[2].UpkeepID, int64(3)) + + // preloads registry data + assert.Equal(t, registry.ID, eligibleUpkeeps[0].RegistryID) + assert.Equal(t, registry.ID, eligibleUpkeeps[1].RegistryID) + assert.Equal(t, registry.ID, eligibleUpkeeps[2].RegistryID) + assert.Equal(t, registry.CheckGas, eligibleUpkeeps[0].Registry.CheckGas) + assert.Equal(t, registry.CheckGas, eligibleUpkeeps[1].Registry.CheckGas) + assert.Equal(t, registry.CheckGas, eligibleUpkeeps[2].Registry.CheckGas) + assert.Equal(t, registry.ContractAddress, eligibleUpkeeps[0].Registry.ContractAddress) + assert.Equal(t, registry.ContractAddress, eligibleUpkeeps[1].Registry.ContractAddress) + assert.Equal(t, registry.ContractAddress, eligibleUpkeeps[2].Registry.ContractAddress) +} + +func TestKeeperDB_EligibleUpkeeps_GracePeriod(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() + + blockheight := int64(120) + gracePeriod := int64(100) + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 2, 20) + upkeep1 := newUpkeep(registry, 0) + upkeep1.LastRunBlockHeight = 0 + upkeep2 := newUpkeep(registry, 1) + upkeep2.LastRunBlockHeight = 19 + upkeep3 := newUpkeep(registry, 2) + upkeep3.LastRunBlockHeight = 20 + + upkeeps := [3]keeper.UpkeepRegistration{upkeep1, upkeep2, upkeep3} + for i := range upkeeps { + err := orm.UpsertUpkeep(&upkeeps[i]) + require.NoError(t, err) + } + + cltest.AssertCount(t, db, "upkeep_registrations", 3) + + eligibleUpkeeps, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, blockheight, gracePeriod) + assert.NoError(t, err) + // 2 out of 3 are eligible, check that ids are 0 or 1 but order is shuffled so can not use equals + assert.Len(t, eligibleUpkeeps, 2) + assert.Less(t, eligibleUpkeeps[0].UpkeepID, int64(2)) + assert.Less(t, eligibleUpkeeps[1].UpkeepID, int64(2)) +} + +func TestKeeperDB_EligibleUpkeeps_KeepersRotate(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 2, 20) + registry.NumKeepers = 5 + require.NoError(t, db.Get(®istry, `UPDATE keeper_registries SET num_keepers = 5 WHERE id = $1 RETURNING *`, registry.ID)) + cltest.MustInsertUpkeepForRegistry(t, db, config, registry) + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 1) + + // out of 5 valid block ranges, with 5 keepers, we are eligible + // to submit on exactly 1 of them + list1, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 20, 0) + require.NoError(t, err) + list2, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 41, 0) + require.NoError(t, err) + list3, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 62, 0) + require.NoError(t, err) + list4, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 83, 0) + require.NoError(t, err) + list5, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 104, 0) + require.NoError(t, err) + + totalEligible := len(list1) + len(list2) + len(list3) + len(list4) + len(list5) + require.Equal(t, 1, totalEligible) +} + +func TestKeeperDB_EligibleUpkeeps_KeepersCycleAllUpkeeps(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 2, 20) + require.NoError(t, db.Get(®istry, `UPDATE keeper_registries SET num_keepers = 5, keeper_index = 3 WHERE id = $1 RETURNING *`, registry.ID)) + + for i := 0; i < 1000; i++ { + cltest.MustInsertUpkeepForRegistry(t, db, config, registry) + } + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 1000) + + // in a full cycle, each node should be responsible for each upkeep exactly once + list1, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 20, 0) // someone eligible + require.NoError(t, err) + list2, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 40, 0) // someone eligible + require.NoError(t, err) + list3, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 60, 0) // someone eligible + require.NoError(t, err) + list4, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 80, 0) // someone eligible + require.NoError(t, err) + list5, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 100, 0) // someone eligible + require.NoError(t, err) + + totalEligible := len(list1) + len(list2) + len(list3) + len(list4) + len(list5) + require.Equal(t, 1000, totalEligible) +} + +func TestKeeperDB_EligibleUpkeeps_FiltersByRegistry(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() + + registry1, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + registry2, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + + cltest.MustInsertUpkeepForRegistry(t, db, config, registry1) + cltest.MustInsertUpkeepForRegistry(t, db, config, registry2) + + cltest.AssertCount(t, db, "keeper_registries", 2) + cltest.AssertCount(t, db, "upkeep_registrations", 2) + + list1, err := orm.EligibleUpkeepsForRegistry(registry1.ContractAddress, 20, 0) + require.NoError(t, err) + list2, err := orm.EligibleUpkeepsForRegistry(registry2.ContractAddress, 20, 0) + require.NoError(t, err) + + assert.Equal(t, 1, len(list1)) + assert.Equal(t, 1, len(list2)) +} + +func TestKeeperDB_NextUpkeepID(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 2, 20) + + nextID, err := orm.LowestUnsyncedID(registry.ID) + require.NoError(t, err) + require.Equal(t, int64(0), nextID) + + upkeep := newUpkeep(registry, 0) + err = orm.UpsertUpkeep(&upkeep) + require.NoError(t, err) + + nextID, err = orm.LowestUnsyncedID(registry.ID) + require.NoError(t, err) + require.Equal(t, int64(1), nextID) + + upkeep = newUpkeep(registry, 3) + err = orm.UpsertUpkeep(&upkeep) + require.NoError(t, err) + + nextID, err = orm.LowestUnsyncedID(registry.ID) + require.NoError(t, err) + require.Equal(t, int64(4), nextID) +} + +func TestKeeperDB_SetLastRunInfoForUpkeepOnJob(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() + + registry, j := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 2, 20) + upkeep := cltest.MustInsertUpkeepForRegistry(t, db, config, registry) + + // check normal behavior + err := orm.SetLastRunInfoForUpkeepOnJob(j.ID, upkeep.UpkeepID, 100, registry.FromAddress) + require.NoError(t, err) + assertLastRunHeight(t, db, upkeep, 100, 0) + // check that if we put in an unknown from address nothing breaks + err = orm.SetLastRunInfoForUpkeepOnJob(j.ID, upkeep.UpkeepID, 0, cltest.NewEIP55Address()) + require.NoError(t, err) + assertLastRunHeight(t, db, upkeep, 100, 0) +} diff --git a/core/services/keeper/orm_test.go b/core/services/keeper/orm_test.go index 43cc1680ba2..c0c15e3e19a 100644 --- a/core/services/keeper/orm_test.go +++ b/core/services/keeper/orm_test.go @@ -1,6 +1,8 @@ package keeper_test import ( + "fmt" + "sort" "testing" "time" @@ -17,6 +19,7 @@ import ( "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/keeper" + "github.com/smartcontractkit/chainlink/core/utils" ) var ( @@ -56,10 +59,11 @@ func waitLastRunHeight(t *testing.T, db *sqlx.DB, upkeep keeper.UpkeepRegistrati }, time.Second*2, time.Millisecond*100).Should(gomega.Equal(height)) } -func assertLastRunHeight(t *testing.T, db *sqlx.DB, upkeep keeper.UpkeepRegistration, height int64) { +func assertLastRunHeight(t *testing.T, db *sqlx.DB, upkeep keeper.UpkeepRegistration, lastRunBlockHeight int64, lastKeeperIndex int64) { err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations WHERE id = $1`, upkeep.ID) require.NoError(t, err) - require.Equal(t, height, upkeep.LastRunBlockHeight) + require.Equal(t, lastRunBlockHeight, upkeep.LastRunBlockHeight) + require.Equal(t, lastKeeperIndex, upkeep.LastKeeperIndex.Int64) } func TestKeeperDB_Registries(t *testing.T) { @@ -67,20 +71,33 @@ func TestKeeperDB_Registries(t *testing.T) { db, config, orm := setupKeeperDB(t) ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() - cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) - cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) + cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) existingRegistries, err := orm.Registries() require.NoError(t, err) require.Equal(t, 2, len(existingRegistries)) } +func TestKeeperDB_RegistryByContractAddress(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + + registryByContractAddress, err := orm.RegistryByContractAddress(registry.ContractAddress) + require.NoError(t, err) + require.Equal(t, registry, registryByContractAddress) +} + func TestKeeperDB_UpsertUpkeep(t *testing.T) { t.Parallel() db, config, orm := setupKeeperDB(t) ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() - registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) upkeep := keeper.UpkeepRegistration{ UpkeepID: 0, ExecuteGas: executeGas, @@ -96,7 +113,6 @@ func TestKeeperDB_UpsertUpkeep(t *testing.T) { // update upkeep upkeep.ExecuteGas = 20_000 upkeep.CheckData = common.Hex2Bytes("8888") - upkeep.PositioningConstant = 2 upkeep.LastRunBlockHeight = 2 err := orm.UpsertUpkeep(&upkeep) @@ -108,7 +124,6 @@ func TestKeeperDB_UpsertUpkeep(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(20_000), upkeepFromDB.ExecuteGas) require.Equal(t, "8888", common.Bytes2Hex(upkeepFromDB.CheckData)) - require.Equal(t, int32(2), upkeepFromDB.PositioningConstant) require.Equal(t, int64(1), upkeepFromDB.LastRunBlockHeight) // shouldn't change on upsert } @@ -117,7 +132,7 @@ func TestKeeperDB_BatchDeleteUpkeepsForJob(t *testing.T) { db, config, orm := setupKeeperDB(t) ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() - registry, job := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) + registry, job := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) for i := int64(0); i < 3; i++ { cltest.MustInsertUpkeepForRegistry(t, db, config, registry) @@ -143,7 +158,7 @@ func TestKeeperDB_EligibleUpkeeps_Shuffle(t *testing.T) { blockheight := int64(63) gracePeriod := int64(10) - registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) ordered := [100]int64{} for i := 0; i < 100; i++ { @@ -152,10 +167,9 @@ func TestKeeperDB_EligibleUpkeeps_Shuffle(t *testing.T) { err := orm.UpsertUpkeep(&k) require.NoError(t, err) } - cltest.AssertCount(t, db, "upkeep_registrations", 100) - eligibleUpkeeps, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, blockheight, gracePeriod) + eligibleUpkeeps, err := orm.NewEligibleUpkeepsForRegistry(registry.ContractAddress, blockheight, gracePeriod, fmt.Sprintf("%b", utils.NewHash().Big())) assert.NoError(t, err) require.Len(t, eligibleUpkeeps, 100) @@ -166,211 +180,182 @@ func TestKeeperDB_EligibleUpkeeps_Shuffle(t *testing.T) { assert.NotEqualValues(t, ordered, shuffled) } -func TestKeeperDB_EligibleUpkeeps_BlockCountPerTurn(t *testing.T) { +func TestKeeperDB_NewEligibleUpkeeps_GracePeriod(t *testing.T) { t.Parallel() db, config, orm := setupKeeperDB(t) ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() - blockheight := int64(63) - gracePeriod := int64(10) - - registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) - - upkeeps := [5]keeper.UpkeepRegistration{ - newUpkeep(registry, 0), - newUpkeep(registry, 1), - newUpkeep(registry, 2), - newUpkeep(registry, 3), - newUpkeep(registry, 4), - } - - upkeeps[0].LastRunBlockHeight = 0 // Never run - upkeeps[1].LastRunBlockHeight = 41 // Run last turn, outside grade period - upkeeps[2].LastRunBlockHeight = 46 // Run last turn, outside grade period - upkeeps[3].LastRunBlockHeight = 59 // Run last turn, inside grace period (EXCLUDE) - upkeeps[4].LastRunBlockHeight = 61 // Run this turn, inside grace period (EXCLUDE) + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 2, 20) - for _, upkeep := range upkeeps { - err := orm.UpsertUpkeep(&upkeep) - require.NoError(t, err) + for i := 0; i < 100; i++ { + cltest.MustInsertUpkeepForRegistry(t, db, config, registry) } - cltest.AssertCount(t, db, "upkeep_registrations", 5) + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 100) - eligibleUpkeeps, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, blockheight, gracePeriod) - assert.NoError(t, err) + // if current keeper index = 0 and all upkeeps last perform was done by index = 0 and still within grace period + upkeep := keeper.UpkeepRegistration{} + require.NoError(t, db.Get(&upkeep, `UPDATE upkeep_registrations SET last_keeper_index = 0, last_run_block_height = 10 RETURNING *`)) + list0, err := orm.NewEligibleUpkeepsForRegistry(registry.ContractAddress, 21, 100, fmt.Sprintf("%b", utils.NewHash().Big())) // none eligible + require.NoError(t, err) + require.Equal(t, 0, len(list0), "should be 0 as all last perform was done by current node") - // 3 out of 5 are eligible, check that ids are 0,1 or 2 but order is shuffled so can not use equals - require.Len(t, eligibleUpkeeps, 3) - assert.Less(t, eligibleUpkeeps[0].UpkeepID, int64(3)) - assert.Less(t, eligibleUpkeeps[1].UpkeepID, int64(3)) - assert.Less(t, eligibleUpkeeps[2].UpkeepID, int64(3)) - - // preloads registry data - assert.Equal(t, registry.ID, eligibleUpkeeps[0].RegistryID) - assert.Equal(t, registry.ID, eligibleUpkeeps[1].RegistryID) - assert.Equal(t, registry.ID, eligibleUpkeeps[2].RegistryID) - assert.Equal(t, registry.CheckGas, eligibleUpkeeps[0].Registry.CheckGas) - assert.Equal(t, registry.CheckGas, eligibleUpkeeps[1].Registry.CheckGas) - assert.Equal(t, registry.CheckGas, eligibleUpkeeps[2].Registry.CheckGas) - assert.Equal(t, registry.ContractAddress, eligibleUpkeeps[0].Registry.ContractAddress) - assert.Equal(t, registry.ContractAddress, eligibleUpkeeps[1].Registry.ContractAddress) - assert.Equal(t, registry.ContractAddress, eligibleUpkeeps[2].Registry.ContractAddress) + // once passed grace period + list1, err := orm.NewEligibleUpkeepsForRegistry(registry.ContractAddress, 121, 100, fmt.Sprintf("%b", utils.NewHash().Big())) // none eligible + require.NoError(t, err) + require.NotEqual(t, 0, len(list1), "should get some eligible upkeeps now that they are outside grace period") } -func TestKeeperDB_EligibleUpkeeps_GracePeriod(t *testing.T) { +func TestKeeperDB_EligibleUpkeeps_TurnsRandom(t *testing.T) { t.Parallel() db, config, orm := setupKeeperDB(t) ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() - blockheight := int64(120) - gracePeriod := int64(100) + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 3, 10) - registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) - upkeep1 := newUpkeep(registry, 0) - upkeep1.LastRunBlockHeight = 0 - upkeep2 := newUpkeep(registry, 1) - upkeep2.LastRunBlockHeight = 19 - upkeep3 := newUpkeep(registry, 2) - upkeep3.LastRunBlockHeight = 20 - - for _, upkeep := range [3]keeper.UpkeepRegistration{upkeep1, upkeep2, upkeep3} { - err := orm.UpsertUpkeep(&upkeep) - require.NoError(t, err) + for i := 0; i < 1000; i++ { + cltest.MustInsertUpkeepForRegistry(t, db, config, registry) } - cltest.AssertCount(t, db, "upkeep_registrations", 3) + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 1000) - eligibleUpkeeps, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, blockheight, gracePeriod) - assert.NoError(t, err) - // 2 out of 3 are eligible, check that ids are 0 or 1 but order is shuffled so can not use equals - assert.Len(t, eligibleUpkeeps, 2) - assert.Less(t, eligibleUpkeeps[0].UpkeepID, int64(2)) - assert.Less(t, eligibleUpkeeps[1].UpkeepID, int64(2)) + // 3 keepers 10 block turns should be different every turn + list1, err := orm.NewEligibleUpkeepsForRegistry(registry.ContractAddress, 20, 100, fmt.Sprintf("%b", utils.NewHash().Big())) + require.NoError(t, err) + list2, err := orm.NewEligibleUpkeepsForRegistry(registry.ContractAddress, 31, 100, fmt.Sprintf("%b", utils.NewHash().Big())) + require.NoError(t, err) + list3, err := orm.NewEligibleUpkeepsForRegistry(registry.ContractAddress, 42, 100, fmt.Sprintf("%b", utils.NewHash().Big())) + require.NoError(t, err) + list4, err := orm.NewEligibleUpkeepsForRegistry(registry.ContractAddress, 53, 100, fmt.Sprintf("%b", utils.NewHash().Big())) + require.NoError(t, err) + + // sort before compare + sort.Slice(list1, func(i, j int) bool { + return list1[i].UpkeepID < list1[j].UpkeepID + }) + sort.Slice(list2, func(i, j int) bool { + return list2[i].UpkeepID < list2[j].UpkeepID + }) + sort.Slice(list3, func(i, j int) bool { + return list3[i].UpkeepID < list3[j].UpkeepID + }) + sort.Slice(list4, func(i, j int) bool { + return list4[i].UpkeepID < list4[j].UpkeepID + }) + + assert.NotEqual(t, list1, list2, "list1 vs list2") + assert.NotEqual(t, list1, list3, "list1 vs list3") + assert.NotEqual(t, list1, list4, "list1 vs list4") } -func TestKeeperDB_EligibleUpkeeps_KeepersRotate(t *testing.T) { +func TestKeeperDB_NewEligibleUpkeeps_SkipIfLastPerformedByCurrentKeeper(t *testing.T) { t.Parallel() db, config, orm := setupKeeperDB(t) ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() - registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) - registry.NumKeepers = 5 - require.NoError(t, db.Get(®istry, `UPDATE keeper_registries SET num_keepers = 5 WHERE id = $1 RETURNING *`, registry.ID)) - cltest.MustInsertUpkeepForRegistry(t, db, config, registry) + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 2, 20) + + for i := 0; i < 100; i++ { + cltest.MustInsertUpkeepForRegistry(t, db, config, registry) + } cltest.AssertCount(t, db, "keeper_registries", 1) - cltest.AssertCount(t, db, "upkeep_registrations", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 100) - // out of 5 valid block ranges, with 5 keepers, we are eligible - // to submit on exactly 1 of them - list1, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 20, 0) - require.NoError(t, err) - list2, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 41, 0) - require.NoError(t, err) - list3, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 62, 0) + // if current keeper index = 0 and all upkeeps last perform was done by index = 0 then skip as it would not pass required turn taking + upkeep := keeper.UpkeepRegistration{} + require.NoError(t, db.Get(&upkeep, `UPDATE upkeep_registrations SET last_keeper_index = 0 RETURNING *`)) + list0, err := orm.NewEligibleUpkeepsForRegistry(registry.ContractAddress, 21, 100, fmt.Sprintf("%b", utils.NewHash().Big())) // none eligible require.NoError(t, err) - list4, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 83, 0) - require.NoError(t, err) - list5, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 104, 0) - require.NoError(t, err) - - totalEligible := len(list1) + len(list2) + len(list3) + len(list4) + len(list5) - require.Equal(t, 1, totalEligible) + require.Equal(t, 0, len(list0), "should be 0 as all last perform was done by current node") } -func TestKeeperDB_EligibleUpkeeps_KeepersCycleAllUpkeeps(t *testing.T) { +func TestKeeperDB_NewEligibleUpkeeps_CoverBuddy(t *testing.T) { t.Parallel() db, config, orm := setupKeeperDB(t) ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() - registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) - require.NoError(t, db.Get(®istry, `UPDATE keeper_registries SET num_keepers = 5, keeper_index = 3 WHERE id = $1 RETURNING *`, registry.ID)) + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 1, 2, 20) - for i := 0; i < 1000; i++ { + for i := 0; i < 100; i++ { cltest.MustInsertUpkeepForRegistry(t, db, config, registry) } cltest.AssertCount(t, db, "keeper_registries", 1) - cltest.AssertCount(t, db, "upkeep_registrations", 1000) + cltest.AssertCount(t, db, "upkeep_registrations", 100) - // in a full cycle, each node should be responsible for each upkeep exactly once - list1, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 20, 0) // someone eligible - require.NoError(t, err) - list2, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 40, 0) // someone eligible - require.NoError(t, err) - list3, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 60, 0) // someone eligible - require.NoError(t, err) - list4, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 80, 0) // someone eligible + upkeep := keeper.UpkeepRegistration{} + binaryHash := fmt.Sprintf("%b", utils.NewHash().Big()) + listBefore, err := orm.NewEligibleUpkeepsForRegistry(registry.ContractAddress, 21, 100, binaryHash) // normal require.NoError(t, err) - list5, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 100, 0) // someone eligible + require.NoError(t, db.Get(&upkeep, `UPDATE upkeep_registrations SET last_keeper_index = 0 RETURNING *`)) + listAfter, err := orm.NewEligibleUpkeepsForRegistry(registry.ContractAddress, 21, 100, binaryHash) // covering buddy require.NoError(t, err) - - totalEligible := len(list1) + len(list2) + len(list3) + len(list4) + len(list5) - require.Equal(t, 1000, totalEligible) + require.Greater(t, len(listAfter), len(listBefore), "after our buddy runs all the performs we should have more eligible then a normal turn") } -func TestKeeperDB_EligibleUpkeeps_FiltersByRegistry(t *testing.T) { +func TestKeeperDB_NewEligibleUpkeeps_FirstTurn(t *testing.T) { t.Parallel() db, config, orm := setupKeeperDB(t) ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() - registry1, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) - registry2, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 2, 20) - cltest.MustInsertUpkeepForRegistry(t, db, config, registry1) - cltest.MustInsertUpkeepForRegistry(t, db, config, registry2) + for i := 0; i < 100; i++ { + cltest.MustInsertUpkeepForRegistry(t, db, config, registry) + } - cltest.AssertCount(t, db, "keeper_registries", 2) - cltest.AssertCount(t, db, "upkeep_registrations", 2) + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 100) - list1, err := orm.EligibleUpkeepsForRegistry(registry1.ContractAddress, 20, 0) + binaryHash := fmt.Sprintf("%b", utils.NewHash().Big()) + // last keeper index is null to simulate a normal first run + listKpr0, err := orm.NewEligibleUpkeepsForRegistry(registry.ContractAddress, 21, 100, binaryHash) // someone eligible only kpr0 turn require.NoError(t, err) - list2, err := orm.EligibleUpkeepsForRegistry(registry2.ContractAddress, 20, 0) - require.NoError(t, err) - - assert.Equal(t, 1, len(list1)) - assert.Equal(t, 1, len(list2)) + require.NotEqual(t, 0, len(listKpr0), "kpr0 should have some eligible as a normal turn") } -func TestKeeperDB_NextUpkeepID(t *testing.T) { +func TestKeeperDB_NewEligibleUpkeeps_FiltersByRegistry(t *testing.T) { t.Parallel() db, config, orm := setupKeeperDB(t) ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() - registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) + registry1, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + registry2, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) - nextID, err := orm.LowestUnsyncedID(registry.ID) - require.NoError(t, err) - require.Equal(t, int64(0), nextID) + cltest.MustInsertUpkeepForRegistry(t, db, config, registry1) + cltest.MustInsertUpkeepForRegistry(t, db, config, registry2) - upkeep := newUpkeep(registry, 0) - err = orm.UpsertUpkeep(&upkeep) - require.NoError(t, err) + cltest.AssertCount(t, db, "keeper_registries", 2) + cltest.AssertCount(t, db, "upkeep_registrations", 2) - nextID, err = orm.LowestUnsyncedID(registry.ID) + binaryHash := fmt.Sprintf("%b", utils.NewHash().Big()) + list1, err := orm.NewEligibleUpkeepsForRegistry(registry1.ContractAddress, 20, 100, binaryHash) require.NoError(t, err) - require.Equal(t, int64(1), nextID) - - upkeep = newUpkeep(registry, 3) - err = orm.UpsertUpkeep(&upkeep) + list2, err := orm.NewEligibleUpkeepsForRegistry(registry2.ContractAddress, 20, 100, binaryHash) require.NoError(t, err) - nextID, err = orm.LowestUnsyncedID(registry.ID) - require.NoError(t, err) - require.Equal(t, int64(4), nextID) + assert.Equal(t, 1, len(list1)) + assert.Equal(t, 1, len(list2)) } -func TestKeeperDB_SetLastRunHeightForUpkeepOnJob(t *testing.T) { +func TestKeeperDB_NewSetLastRunInfoForUpkeepOnJob(t *testing.T) { t.Parallel() db, config, orm := setupKeeperDB(t) ethKeyStore := cltest.NewKeyStore(t, db, config).Eth() - registry, j := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore) + registry, j := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) upkeep := cltest.MustInsertUpkeepForRegistry(t, db, config, registry) - orm.SetLastRunHeightForUpkeepOnJob(j.ID, upkeep.UpkeepID, 100) - assertLastRunHeight(t, db, upkeep, 100) - orm.SetLastRunHeightForUpkeepOnJob(j.ID, upkeep.UpkeepID, 0) - assertLastRunHeight(t, db, upkeep, 0) + // update + require.NoError(t, orm.SetLastRunInfoForUpkeepOnJob(j.ID, upkeep.UpkeepID, 100, registry.FromAddress)) + assertLastRunHeight(t, db, upkeep, 100, 0) + // update to lower block not allowed + require.NoError(t, orm.SetLastRunInfoForUpkeepOnJob(j.ID, upkeep.UpkeepID, 0, registry.FromAddress)) + assertLastRunHeight(t, db, upkeep, 100, 0) + // update to higher block allowed + require.NoError(t, orm.SetLastRunInfoForUpkeepOnJob(j.ID, upkeep.UpkeepID, 101, registry.FromAddress)) + assertLastRunHeight(t, db, upkeep, 101, 0) } diff --git a/core/services/keeper/registry_synchronizer_core.go b/core/services/keeper/registry_synchronizer_core.go index 6ff2f9053b4..e84ee85bce1 100644 --- a/core/services/keeper/registry_synchronizer_core.go +++ b/core/services/keeper/registry_synchronizer_core.go @@ -22,10 +22,10 @@ var ( // MailRoom holds the log mailboxes for all the log types that keeper cares about type MailRoom struct { - mbUpkeepCanceled *utils.Mailbox - mbSyncRegistry *utils.Mailbox - mbUpkeepPerformed *utils.Mailbox - mbUpkeepRegistered *utils.Mailbox + mbUpkeepCanceled *utils.Mailbox[log.Broadcast] + mbSyncRegistry *utils.Mailbox[log.Broadcast] + mbUpkeepPerformed *utils.Mailbox[log.Broadcast] + mbUpkeepRegistered *utils.Mailbox[log.Broadcast] } type RegistrySynchronizerOptions struct { @@ -38,10 +38,12 @@ type RegistrySynchronizerOptions struct { MinIncomingConfirmations uint32 Logger logger.Logger SyncUpkeepQueueSize uint32 + newTurnEnabled bool } type RegistrySynchronizer struct { chStop chan struct{} + newTurnEnabled bool contract *keeper_registry_wrapper.KeeperRegistry interval time.Duration job job.Job @@ -59,10 +61,10 @@ type RegistrySynchronizer struct { // NewRegistrySynchronizer is the constructor of RegistrySynchronizer func NewRegistrySynchronizer(opts RegistrySynchronizerOptions) *RegistrySynchronizer { mailRoom := MailRoom{ - mbUpkeepCanceled: utils.NewMailbox(50), - mbSyncRegistry: utils.NewMailbox(1), - mbUpkeepPerformed: utils.NewMailbox(300), - mbUpkeepRegistered: utils.NewMailbox(50), + mbUpkeepCanceled: utils.NewMailbox[log.Broadcast](50), + mbSyncRegistry: utils.NewMailbox[log.Broadcast](1), + mbUpkeepPerformed: utils.NewMailbox[log.Broadcast](300), + mbUpkeepRegistered: utils.NewMailbox[log.Broadcast](50), } return &RegistrySynchronizer{ chStop: make(chan struct{}), @@ -76,6 +78,7 @@ func NewRegistrySynchronizer(opts RegistrySynchronizerOptions) *RegistrySynchron orm: opts.ORM, logger: logger.Sugared(opts.Logger.Named("RegistrySynchronizer")), syncUpkeepQueueSize: opts.SyncUpkeepQueueSize, + newTurnEnabled: opts.newTurnEnabled, } } @@ -85,24 +88,41 @@ func (rs *RegistrySynchronizer) Start(context.Context) error { rs.wgDone.Add(2) go rs.run() - logListenerOpts := log.ListenerOpts{ - Contract: rs.contract.Address(), - ParseLog: rs.contract.ParseLog, - LogsWithTopics: map[common.Hash][][]log.Topic{ - keeper_registry_wrapper.KeeperRegistryKeepersUpdated{}.Topic(): nil, - keeper_registry_wrapper.KeeperRegistryConfigSet{}.Topic(): nil, - keeper_registry_wrapper.KeeperRegistryUpkeepCanceled{}.Topic(): nil, - keeper_registry_wrapper.KeeperRegistryUpkeepRegistered{}.Topic(): nil, - keeper_registry_wrapper.KeeperRegistryUpkeepPerformed{}.Topic(): { - {}, - {}, - { - log.Topic(rs.job.KeeperSpec.FromAddress.Hash()), + var logListenerOpts log.ListenerOpts + if rs.newTurnEnabled { + logListenerOpts = log.ListenerOpts{ + Contract: rs.contract.Address(), + ParseLog: rs.contract.ParseLog, + LogsWithTopics: map[common.Hash][][]log.Topic{ + keeper_registry_wrapper.KeeperRegistryKeepersUpdated{}.Topic(): nil, + keeper_registry_wrapper.KeeperRegistryConfigSet{}.Topic(): nil, + keeper_registry_wrapper.KeeperRegistryUpkeepCanceled{}.Topic(): nil, + keeper_registry_wrapper.KeeperRegistryUpkeepRegistered{}.Topic(): nil, + keeper_registry_wrapper.KeeperRegistryUpkeepPerformed{}.Topic(): nil, + }, + MinIncomingConfirmations: rs.minIncomingConfirmations, + } + } else { + logListenerOpts = log.ListenerOpts{ + Contract: rs.contract.Address(), + ParseLog: rs.contract.ParseLog, + LogsWithTopics: map[common.Hash][][]log.Topic{ + keeper_registry_wrapper.KeeperRegistryKeepersUpdated{}.Topic(): nil, + keeper_registry_wrapper.KeeperRegistryConfigSet{}.Topic(): nil, + keeper_registry_wrapper.KeeperRegistryUpkeepCanceled{}.Topic(): nil, + keeper_registry_wrapper.KeeperRegistryUpkeepRegistered{}.Topic(): nil, + keeper_registry_wrapper.KeeperRegistryUpkeepPerformed{}.Topic(): { + {}, + {}, + { + log.Topic(rs.job.KeeperSpec.FromAddress.Hash()), + }, }, }, - }, - MinIncomingConfirmations: rs.minIncomingConfirmations, + MinIncomingConfirmations: rs.minIncomingConfirmations, + } } + lbUnsubscribe := rs.logBroadcaster.Register(rs, logListenerOpts) go func() { diff --git a/core/services/keeper/registry_synchronizer_process_logs.go b/core/services/keeper/registry_synchronizer_process_logs.go index 324facbaa41..f064ee4731f 100644 --- a/core/services/keeper/registry_synchronizer_process_logs.go +++ b/core/services/keeper/registry_synchronizer_process_logs.go @@ -6,6 +6,7 @@ import ( "github.com/smartcontractkit/chainlink/core/chains/evm/log" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_wrapper" + "github.com/smartcontractkit/chainlink/core/services/keystore/keys/ethkey" ) func (rs *RegistrySynchronizer) processLogs() { @@ -20,15 +21,10 @@ func (rs *RegistrySynchronizer) processLogs() { func (rs *RegistrySynchronizer) handleSyncRegistryLog(done func()) { defer done() - i, exists := rs.mailRoom.mbSyncRegistry.Retrieve() + broadcast, exists := rs.mailRoom.mbSyncRegistry.Retrieve() if !exists { return } - broadcast, ok := i.(log.Broadcast) - if !ok { - rs.logger.AssumptionViolationf("expected log.Broadcast but got %T", broadcast) - return - } txHash := broadcast.RawLog().TxHash.Hex() rs.logger.Debugw("processing SyncRegistry log", "txHash", txHash) was, err := rs.logBroadcaster.WasAlreadyConsumed(broadcast) @@ -52,15 +48,10 @@ func (rs *RegistrySynchronizer) handleSyncRegistryLog(done func()) { func (rs *RegistrySynchronizer) handleUpkeepCanceledLogs(done func()) { defer done() for { - i, exists := rs.mailRoom.mbUpkeepCanceled.Retrieve() + broadcast, exists := rs.mailRoom.mbUpkeepCanceled.Retrieve() if !exists { return } - broadcast, ok := i.(log.Broadcast) - if !ok { - rs.logger.AssumptionViolationf("expected log.Broadcast but got %T", broadcast) - continue - } rs.handleUpkeepCancelled(broadcast) } } @@ -101,15 +92,10 @@ func (rs *RegistrySynchronizer) handleUpkeepRegisteredLogs(done func()) { return } for { - i, exists := rs.mailRoom.mbUpkeepRegistered.Retrieve() + broadcast, exists := rs.mailRoom.mbUpkeepRegistered.Retrieve() if !exists { return } - broadcast, ok := i.(log.Broadcast) - if !ok { - rs.logger.AssumptionViolationf("expected log.Broadcast but got %T", broadcast) - continue - } rs.HandleUpkeepRegistered(broadcast, registry) } } @@ -143,22 +129,17 @@ func (rs *RegistrySynchronizer) HandleUpkeepRegistered(broadcast log.Broadcast, func (rs *RegistrySynchronizer) handleUpkeepPerformedLogs(done func()) { defer done() for { - i, exists := rs.mailRoom.mbUpkeepPerformed.Retrieve() + broadcast, exists := rs.mailRoom.mbUpkeepPerformed.Retrieve() if !exists { return } - broadcast, ok := i.(log.Broadcast) - if !ok { - rs.logger.AssumptionViolationf("expected log.Broadcast but got %T", broadcast) - continue - } rs.handleUpkeepPerformed(broadcast) } } func (rs *RegistrySynchronizer) handleUpkeepPerformed(broadcast log.Broadcast) { txHash := broadcast.RawLog().TxHash.Hex() - rs.logger.Debugw("processing UpkeepPerformed log", "txHash", txHash) + rs.logger.Debugw("processing UpkeepPerformed log", "jobID", rs.job.ID, "txHash", txHash) was, err := rs.logBroadcaster.WasAlreadyConsumed(broadcast) if err != nil { rs.logger.With("error", err).Warn("unable to check if log was consumed") @@ -174,13 +155,16 @@ func (rs *RegistrySynchronizer) handleUpkeepPerformed(broadcast log.Broadcast) { rs.logger.AssumptionViolationf("expected UpkeepPerformed log but got %T", log) return } - - // set last run to 0 so that keeper can resume checkUpkeep() - err = rs.orm.SetLastRunHeightForUpkeepOnJob(rs.job.ID, log.Id.Int64(), 0) + err = rs.orm.SetLastRunInfoForUpkeepOnJob(rs.job.ID, log.Id.Int64(), int64(broadcast.RawLog().BlockNumber), ethkey.EIP55AddressFromAddress(log.From)) if err != nil { rs.logger.With("error", err).Error("failed to set last run to 0") return } + rs.logger.Debugw("updated db for UpkeepPerformed log", + "jobID", rs.job.ID, + "upkeepID", log.Id.Int64(), + "blockNumber", int64(broadcast.RawLog().BlockNumber), + "fromAddr", ethkey.EIP55AddressFromAddress(log.From)) if err := rs.logBroadcaster.MarkConsumed(broadcast); err != nil { rs.logger.With("error", err).With("log", broadcast.String()).Error("unable to mark KeeperRegistryUpkeepPerformed log as consumed") diff --git a/core/services/keeper/registry_synchronizer_sync.go b/core/services/keeper/registry_synchronizer_sync.go index f805eae98a6..092d9ed82b6 100644 --- a/core/services/keeper/registry_synchronizer_sync.go +++ b/core/services/keeper/registry_synchronizer_sync.go @@ -147,7 +147,9 @@ func (rs *RegistrySynchronizer) newRegistryFromChain() (Registry, error) { return Registry{}, errors.Wrap(err, "failed to get keeper list") } keeperIndex := int32(-1) + keeperMap := map[ethkey.EIP55Address]int32{} for idx, address := range keeperAddresses { + keeperMap[ethkey.EIP55AddressFromAddress(address)] = int32(idx) if address == fromAddress.Address() { keeperIndex = int32(idx) } @@ -164,6 +166,7 @@ func (rs *RegistrySynchronizer) newRegistryFromChain() (Registry, error) { JobID: rs.job.ID, KeeperIndex: keeperIndex, NumKeepers: int32(len(keeperAddresses)), + KeeperIndexMap: keeperMap, }, nil } diff --git a/core/services/keeper/registry_synchronizer_test.go b/core/services/keeper/registry_synchronizer_test.go index 93741bd7caa..cf014e26689 100644 --- a/core/services/keeper/registry_synchronizer_test.go +++ b/core/services/keeper/registry_synchronizer_test.go @@ -371,8 +371,8 @@ func Test_RegistrySynchronizer_UpkeepPerformedLog(t *testing.T) { cfg := cltest.NewTestGeneralConfig(t) head := cltest.MustInsertHead(t, db, cfg, 1) - rawLog := types.Log{BlockHash: head.Hash} - log := keeper_registry_wrapper.KeeperRegistryUpkeepPerformed{Id: big.NewInt(0)} + rawLog := types.Log{BlockHash: head.Hash, BlockNumber: 200} + log := keeper_registry_wrapper.KeeperRegistryUpkeepPerformed{Id: big.NewInt(0), From: fromAddress} logBroadcast := new(logmocks.Broadcast) logBroadcast.On("DecodedLog").Return(&log) logBroadcast.On("RawLog").Return(rawLog) @@ -388,6 +388,13 @@ func Test_RegistrySynchronizer_UpkeepPerformedLog(t *testing.T) { err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) require.NoError(t, err) return upkeep.LastRunBlockHeight + }, cltest.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(int64(200))) + + g.Eventually(func() int64 { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastKeeperIndex.Int64 }, cltest.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(int64(0))) ethMock.AssertExpectations(t) diff --git a/core/services/keeper/upkeep_executer.go b/core/services/keeper/upkeep_executer.go index 9aa633d99b4..1fc72d194f3 100644 --- a/core/services/keeper/upkeep_executer.go +++ b/core/services/keeper/upkeep_executer.go @@ -2,6 +2,7 @@ package keeper import ( "context" + "fmt" "math/big" "strconv" "sync" @@ -52,7 +53,7 @@ type UpkeepExecuter struct { headBroadcaster httypes.HeadBroadcasterRegistry gasEstimator gas.Estimator job job.Job - mailbox *utils.Mailbox + mailbox *utils.Mailbox[*evmtypes.Head] orm ORM pr pipeline.Runner logger logger.Logger @@ -78,7 +79,7 @@ func NewUpkeepExecuter( headBroadcaster: headBroadcaster, gasEstimator: gasEstimator, job: job, - mailbox: utils.NewMailbox(1), + mailbox: utils.NewMailbox[*evmtypes.Head](1), config: config, orm: orm, pr: pr, @@ -133,26 +134,48 @@ func (ex *UpkeepExecuter) run() { func (ex *UpkeepExecuter) processActiveUpkeeps() { // Keepers could miss their turn in the turn taking algo if they are too overloaded // with work because processActiveUpkeeps() blocks - item, exists := ex.mailbox.Retrieve() + head, exists := ex.mailbox.Retrieve() if !exists { ex.logger.Info("no head to retrieve. It might have been skipped") return } - head := evmtypes.AsHead(item) - ex.logger.Debugw("checking active upkeeps", "blockheight", head.Number) - activeUpkeeps, err := ex.orm.EligibleUpkeepsForRegistry( - ex.job.KeeperSpec.ContractAddress, - head.Number, - ex.config.KeeperMaximumGracePeriod(), - ) + registry, err := ex.orm.RegistryByContractAddress(ex.job.KeeperSpec.ContractAddress) if err != nil { - ex.logger.With("error", err).Error("unable to load active registrations") + ex.logger.With("error", err).Error("unable to load registry") return } + var activeUpkeeps []UpkeepRegistration + if ex.config.KeeperTurnFlagEnabled() { + turnBinary, err2 := ex.turnBlockHashBinary(registry, head, ex.config.KeeperTurnLookBack()) + if err2 != nil { + ex.logger.With("error", err2).Error("unable to get turn block number hash") + return + } + activeUpkeeps, err2 = ex.orm.NewEligibleUpkeepsForRegistry( + ex.job.KeeperSpec.ContractAddress, + head.Number, + ex.config.KeeperMaximumGracePeriod(), + turnBinary) + if err2 != nil { + ex.logger.With("error", err2).Error("unable to load active registrations") + return + } + } else { + activeUpkeeps, err = ex.orm.EligibleUpkeepsForRegistry( + ex.job.KeeperSpec.ContractAddress, + head.Number, + ex.config.KeeperMaximumGracePeriod(), + ) + if err != nil { + ex.logger.With("error", err).Error("unable to load active registrations") + return + } + } + wg := sync.WaitGroup{} wg.Add(len(activeUpkeeps)) done := func() { @@ -172,8 +195,8 @@ func (ex *UpkeepExecuter) execute(upkeep UpkeepRegistration, head *evmtypes.Head defer done() start := time.Now() - svcLogger := ex.logger.With("blockNum", head.Number, "upkeepID", upkeep.UpkeepID) - svcLogger.Debug("checking upkeep") + svcLogger := ex.logger.With("jobID", ex.job.ID, "blockNum", head.Number, "upkeepID", upkeep.UpkeepID) + svcLogger.Debug("checking upkeep", "lastRunBlockHeight", upkeep.LastRunBlockHeight, "lastKeeperIndex", upkeep.LastKeeperIndex) ctxService, cancel := utils.ContextFromChanWithDeadline(ex.chStop, time.Minute) defer cancel() @@ -227,10 +250,11 @@ func (ex *UpkeepExecuter) execute(upkeep UpkeepRegistration, head *evmtypes.Head // Only after task runs where a tx was broadcast if run.State == pipeline.RunStatusCompleted { - err := ex.orm.SetLastRunHeightForUpkeepOnJob(ex.job.ID, upkeep.UpkeepID, head.Number, pg.WithParentCtx(ctxService)) + err := ex.orm.SetLastRunInfoForUpkeepOnJob(ex.job.ID, upkeep.UpkeepID, head.Number, upkeep.Registry.FromAddress, pg.WithParentCtx(ctxService)) if err != nil { svcLogger.With("error", err).Error("failed to set last run height for upkeep") } + svcLogger.Debugw("execute pipeline status completed", "fromAddr", upkeep.Registry.FromAddress) elapsed := time.Since(start) promCheckUpkeepExecutionTime. @@ -270,3 +294,14 @@ func addBuffer(val *big.Int, prct uint32) *big.Int { 100, ) } + +func (ex *UpkeepExecuter) turnBlockHashBinary(registry Registry, head *evmtypes.Head, lookback int64) (string, error) { + turnBlock := head.Number - (head.Number % int64(registry.BlockCountPerTurn)) - lookback + block, err := ex.ethClient.BlockByNumber(context.Background(), big.NewInt(turnBlock)) + if err != nil { + return "", err + } + hashAtHeight := block.Hash() + binaryString := fmt.Sprintf("%b", hashAtHeight.Big()) + return binaryString, nil +} diff --git a/core/services/keeper/upkeep_executer_test.go b/core/services/keeper/upkeep_executer_test.go index 2985ce6ba4a..fe4f05b92c9 100644 --- a/core/services/keeper/upkeep_executer_test.go +++ b/core/services/keeper/upkeep_executer_test.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/onsi/gomega" "github.com/smartcontractkit/sqlx" "github.com/stretchr/testify/assert" @@ -57,10 +58,16 @@ func setup(t *testing.T) ( ) { cfg := cltest.NewTestGeneralConfig(t) cfg.Overrides.KeeperMaximumGracePeriod = null.IntFrom(0) + cfg.Overrides.KeeperTurnLookBack = null.IntFrom(0) + cfg.Overrides.KeeperTurnFlagEnabled = null.BoolFrom(true) cfg.Overrides.KeeperCheckUpkeepGasPriceFeatureEnabled = null.BoolFrom(true) db := pgtest.NewSqlxDB(t) keyStore := cltest.NewKeyStore(t, db, cfg) ethClient := cltest.NewEthClientMockWithDefaultChain(t) + block := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(1), + }) + ethClient.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(block, nil) txm := new(txmmocks.TxManager) txm.Test(t) estimator := new(gasmocks.Estimator) @@ -75,7 +82,7 @@ func setup(t *testing.T) ( jpv2 := cltest.NewJobPipelineV2(t, cfg, cc, db, keyStore) ch := evmtest.MustGetDefaultChain(t, cc) orm := keeper.NewORM(db, logger.TestLogger(t), ch.Config(), txmgr.SendEveryStrategy{}) - registry, job := cltest.MustInsertKeeperRegistry(t, db, orm, keyStore.Eth()) + registry, job := cltest.MustInsertKeeperRegistry(t, db, orm, keyStore.Eth(), 0, 1, 20) lggr := logger.TestLogger(t) executer := keeper.NewUpkeepExecuter(job, orm, jpv2.Pr, ethClient, ch.HeadBroadcaster(), ch.TxManager().GetGasEstimator(), lggr, ch.Config()) upkeep := cltest.MustInsertUpkeepForRegistry(t, db, ch.Config(), registry) diff --git a/core/services/keeper/validate.go b/core/services/keeper/validate.go index ff5bed3738d..bb810fedf57 100644 --- a/core/services/keeper/validate.go +++ b/core/services/keeper/validate.go @@ -40,7 +40,7 @@ perform_upkeep_tx [type=ethtx evmChainID="$(jobSpec.evmChainID)" data="$(encode_perform_upkeep_tx)" gasLimit="$(jobSpec.performUpkeepGasLimit)" - txMeta="{\"jobID\":$(jobSpec.jobID)}"] + txMeta="{\"jobID\":$(jobSpec.jobID),\"upkeepID\":$(jobSpec.upkeepID)}"] encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx` ) diff --git a/core/services/keeper/validate_test.go b/core/services/keeper/validate_test.go index e26d12dac43..a7c94a90b68 100644 --- a/core/services/keeper/validate_test.go +++ b/core/services/keeper/validate_test.go @@ -87,7 +87,7 @@ perform_upkeep_tx [type=ethtx from="[$(jobSpec.fromAddress)]" evmChainID="$(jobSpec.evmChainID)" data="$(encode_perform_upkeep_tx)" - txMeta="{\\"jobID\\":$(jobSpec.jobID)}"] + txMeta="{\\"jobID\\":$(jobSpec.jobID),\\"upkeepID\\":$(jobSpec.upkeepID)}"] encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx """ `, @@ -136,7 +136,7 @@ perform_upkeep_tx [type=ethtx minConfirmations=0 to="$(jobSpec.contractAddress)" data="$(encode_perform_upkeep_tx)" - txMeta="{\\"jobID\\":$(jobSpec.jobID)}"] + txMeta="{\\"jobID\\":$(jobSpec.jobID),\\"upkeepID\\":$(jobSpec.upkeepID)}"] encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx """ `, diff --git a/core/services/keystore/eth.go b/core/services/keystore/eth.go index ea7b3fde08f..5c0ffad0fb8 100644 --- a/core/services/keystore/eth.go +++ b/core/services/keystore/eth.go @@ -32,9 +32,9 @@ type Eth interface { SignTx(fromAddress common.Address, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) - SendingKeys() (keys []ethkey.KeyV2, err error) + SendingKeys(chainID *big.Int) (keys []ethkey.KeyV2, err error) FundingKeys() (keys []ethkey.KeyV2, err error) - GetRoundRobinAddress(addresses ...common.Address) (address common.Address, err error) + GetRoundRobinAddress(chainID *big.Int, addresses ...common.Address) (address common.Address, err error) GetState(id string) (ethkey.State, error) SetState(ethkey.State) error @@ -131,7 +131,7 @@ func (ks *eth) EnsureKeys(chainID *big.Int) (err error) { var fundingKey ethkey.KeyV2 // check & setup sending key - sendingKeys := ks.sendingKeys() + sendingKeys := ks.sendingKeys(chainID) if len(sendingKeys) > 0 { sendingKey = sendingKeys[0] sendDidExist = true @@ -258,13 +258,15 @@ func (ks *eth) SignTx(address common.Address, tx *types.Transaction, chainID *bi return types.SignTx(tx, signer, key.ToEcdsaPrivKey()) } -func (ks *eth) SendingKeys() (sendingKeys []ethkey.KeyV2, err error) { +// SendingKeys returns all sending keys for the given chain +// If chainID is nil, returns all sending keys for all chains +func (ks *eth) SendingKeys(chainID *big.Int) (sendingKeys []ethkey.KeyV2, err error) { ks.lock.RLock() defer ks.lock.RUnlock() if ks.isLocked() { return nil, ErrLocked } - return ks.sendingKeys(), nil + return ks.sendingKeys(chainID), nil } func (ks *eth) FundingKeys() (fundingKeys []ethkey.KeyV2, err error) { @@ -276,7 +278,7 @@ func (ks *eth) FundingKeys() (fundingKeys []ethkey.KeyV2, err error) { return ks.fundingKeys(), nil } -func (ks *eth) GetRoundRobinAddress(whitelist ...common.Address) (common.Address, error) { +func (ks *eth) GetRoundRobinAddress(chainID *big.Int, whitelist ...common.Address) (common.Address, error) { ks.lock.Lock() defer ks.lock.Unlock() if ks.isLocked() { @@ -285,9 +287,9 @@ func (ks *eth) GetRoundRobinAddress(whitelist ...common.Address) (common.Address var keys []ethkey.KeyV2 if len(whitelist) == 0 { - keys = ks.sendingKeys() + keys = ks.sendingKeys(chainID) } else if len(whitelist) > 0 { - for _, k := range ks.sendingKeys() { + for _, k := range ks.sendingKeys(chainID) { for _, addr := range whitelist { if addr == k.Address.Address() { keys = append(keys, k) @@ -297,7 +299,17 @@ func (ks *eth) GetRoundRobinAddress(whitelist ...common.Address) (common.Address } if len(keys) == 0 { - return common.Address{}, errors.New("no keys available") + var err error + if chainID == nil && len(whitelist) == 0 { + err = errors.New("no sending keys available") + } else if chainID == nil { + err = errors.Errorf("no sending keys available that match whitelist: %v", whitelist) + } else if len(whitelist) == 0 { + err = errors.Errorf("no sending keys available for chain %s", chainID.String()) + } else { + err = errors.Errorf("no sending keys available for chain %s that match whitelist: %v", chainID.String(), whitelist) + } + return common.Address{}, err } sort.SliceStable(keys, func(i, j int) bool { @@ -425,9 +437,11 @@ func (ks *eth) fundingKeys() (fundingKeys []ethkey.KeyV2) { } // caller must hold lock! -func (ks *eth) sendingKeys() (sendingKeys []ethkey.KeyV2) { +// if chainID is nil, returns keys for all chains +func (ks *eth) sendingKeys(chainID *big.Int) (sendingKeys []ethkey.KeyV2) { for _, k := range ks.keyRing.Eth { - if !ks.keyStates.Eth[k.ID()].IsFunding { + state := ks.keyStates.Eth[k.ID()] + if !state.IsFunding && (chainID == nil || (((*big.Int)(&state.EVMChainID)).Cmp(chainID) == 0)) { sendingKeys = append(sendingKeys, k) } } diff --git a/core/services/keystore/eth_test.go b/core/services/keystore/eth_test.go index ad69423db66..2e0730c296c 100644 --- a/core/services/keystore/eth_test.go +++ b/core/services/keystore/eth_test.go @@ -106,7 +106,7 @@ func Test_EthKeyStore(t *testing.T) { defer reset() err := ethKeyStore.EnsureKeys(&cltest.FixtureChainID) assert.NoError(t, err) - sendingKeys1, err := ethKeyStore.SendingKeys() + sendingKeys1, err := ethKeyStore.SendingKeys(nil) assert.NoError(t, err) require.Equal(t, 1, len(sendingKeys1)) @@ -114,12 +114,34 @@ func Test_EthKeyStore(t *testing.T) { err = ethKeyStore.EnsureKeys(&cltest.FixtureChainID) assert.NoError(t, err) - sendingKeys2, err := ethKeyStore.SendingKeys() + sendingKeys2, err := ethKeyStore.SendingKeys(nil) assert.NoError(t, err) require.Equal(t, 1, len(sendingKeys2)) require.Equal(t, sendingKeys1, sendingKeys2) }) + + t.Run("SendingKeys with specified chain ID", func(t *testing.T) { + defer reset() + key, err := ethKeyStore.Create(testutils.FixtureChainID) + require.NoError(t, err) + key2, err := ethKeyStore.Create(big.NewInt(1337)) + require.NoError(t, err) + + keys, err := ethKeyStore.SendingKeys(testutils.FixtureChainID) + require.NoError(t, err) + require.Len(t, keys, 1) + require.Equal(t, key, keys[0]) + + keys, err = ethKeyStore.SendingKeys(big.NewInt(1337)) + require.NoError(t, err) + require.Len(t, keys, 1) + require.Equal(t, key2, keys[0]) + + keys, err = ethKeyStore.SendingKeys(nil) + require.NoError(t, err) + require.Len(t, keys, 2) + }) } func Test_EthKeyStore_GetRoundRobinAddress(t *testing.T) { @@ -132,22 +154,25 @@ func Test_EthKeyStore_GetRoundRobinAddress(t *testing.T) { ethKeyStore := keyStore.Eth() t.Run("should error when no addresses", func(t *testing.T) { - _, err := ethKeyStore.GetRoundRobinAddress() + _, err := ethKeyStore.GetRoundRobinAddress(nil) require.Error(t, err) }) - // create 4 keys - 1 funding and 3 sending + // create 4 keys - 1 funding and 2 sending err := ethKeyStore.EnsureKeys(&cltest.FixtureChainID) require.NoError(t, err) - sendingKeys, err := ethKeyStore.SendingKeys() + sendingKeys, err := ethKeyStore.SendingKeys(nil) assert.NoError(t, err) k1 := sendingKeys[0] k2, _ := cltest.MustInsertRandomKey(t, ethKeyStore) - cltest.MustInsertRandomKey(t, ethKeyStore) - sendingKeys, err = ethKeyStore.SendingKeys() + // create 1 funding and 1 sending key for a different chain + err = ethKeyStore.EnsureKeys(testutils.SimulatedChainID) + require.NoError(t, err) + + sendingKeys, err = ethKeyStore.SendingKeys(nil) assert.NoError(t, err) require.Equal(t, 3, len(sendingKeys)) @@ -156,17 +181,17 @@ func Test_EthKeyStore_GetRoundRobinAddress(t *testing.T) { require.Equal(t, 1, len(fundingKeys)) t.Run("with no address filter, rotates between all sending addresses", func(t *testing.T) { - address1, err := ethKeyStore.GetRoundRobinAddress() + address1, err := ethKeyStore.GetRoundRobinAddress(nil) require.NoError(t, err) - address2, err := ethKeyStore.GetRoundRobinAddress() + address2, err := ethKeyStore.GetRoundRobinAddress(nil) require.NoError(t, err) - address3, err := ethKeyStore.GetRoundRobinAddress() + address3, err := ethKeyStore.GetRoundRobinAddress(nil) require.NoError(t, err) - address4, err := ethKeyStore.GetRoundRobinAddress() + address4, err := ethKeyStore.GetRoundRobinAddress(nil) require.NoError(t, err) - address5, err := ethKeyStore.GetRoundRobinAddress() + address5, err := ethKeyStore.GetRoundRobinAddress(nil) require.NoError(t, err) - address6, err := ethKeyStore.GetRoundRobinAddress() + address6, err := ethKeyStore.GetRoundRobinAddress(nil) require.NoError(t, err) require.NotEqual(t, address1, address2) @@ -181,13 +206,13 @@ func Test_EthKeyStore_GetRoundRobinAddress(t *testing.T) { // fundingKeys[0] is a funding address so even though it's whitelisted, it will be ignored addresses := []common.Address{fundingKeys[0].Address.Address(), k1.Address.Address(), k2.Address.Address(), testutils.NewAddress()} - address1, err := ethKeyStore.GetRoundRobinAddress(addresses...) + address1, err := ethKeyStore.GetRoundRobinAddress(nil, addresses...) require.NoError(t, err) - address2, err := ethKeyStore.GetRoundRobinAddress(addresses...) + address2, err := ethKeyStore.GetRoundRobinAddress(nil, addresses...) require.NoError(t, err) - address3, err := ethKeyStore.GetRoundRobinAddress(addresses...) + address3, err := ethKeyStore.GetRoundRobinAddress(nil, addresses...) require.NoError(t, err) - address4, err := ethKeyStore.GetRoundRobinAddress(addresses...) + address4, err := ethKeyStore.GetRoundRobinAddress(nil, addresses...) require.NoError(t, err) require.True(t, address1 == k1.Address.Address() || address1 == k2.Address.Address()) @@ -198,9 +223,27 @@ func Test_EthKeyStore_GetRoundRobinAddress(t *testing.T) { }) t.Run("with address filter when no address matches", func(t *testing.T) { - _, err := ethKeyStore.GetRoundRobinAddress([]common.Address{testutils.NewAddress()}...) + addr := testutils.NewAddress() + _, err := ethKeyStore.GetRoundRobinAddress(nil, []common.Address{addr}...) require.Error(t, err) - require.Equal(t, "no keys available", err.Error()) + require.Equal(t, fmt.Sprintf("no sending keys available that match whitelist: [%s]", addr.Hex()), err.Error()) + }) + + t.Run("with non-nil chain ID, filters by chain ID", func(t *testing.T) { + sendingKeys, err := ethKeyStore.SendingKeys(testutils.SimulatedChainID) + assert.NoError(t, err) + require.Len(t, sendingKeys, 1) + k := sendingKeys[0] + address1, err := ethKeyStore.GetRoundRobinAddress(testutils.SimulatedChainID) + require.NoError(t, err) + address2, err := ethKeyStore.GetRoundRobinAddress(testutils.SimulatedChainID) + require.NoError(t, err) + address3, err := ethKeyStore.GetRoundRobinAddress(testutils.SimulatedChainID) + require.NoError(t, err) + + require.Equal(t, k.Address.Address(), address1) + require.Equal(t, k.Address.Address(), address2) + require.Equal(t, k.Address.Address(), address3) }) } diff --git a/core/services/keystore/mocks/csa.go b/core/services/keystore/mocks/csa.go index 2e8b946a09e..4508002b9e6 100644 --- a/core/services/keystore/mocks/csa.go +++ b/core/services/keystore/mocks/csa.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/keystore/mocks/eth.go b/core/services/keystore/mocks/eth.go index f842c8c5f18..e980046f23d 100644 --- a/core/services/keystore/mocks/eth.go +++ b/core/services/keystore/mocks/eth.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks @@ -180,19 +180,20 @@ func (_m *Eth) GetAll() ([]ethkey.KeyV2, error) { return r0, r1 } -// GetRoundRobinAddress provides a mock function with given fields: addresses -func (_m *Eth) GetRoundRobinAddress(addresses ...common.Address) (common.Address, error) { +// GetRoundRobinAddress provides a mock function with given fields: chainID, addresses +func (_m *Eth) GetRoundRobinAddress(chainID *big.Int, addresses ...common.Address) (common.Address, error) { _va := make([]interface{}, len(addresses)) for _i := range addresses { _va[_i] = addresses[_i] } var _ca []interface{} + _ca = append(_ca, chainID) _ca = append(_ca, _va...) ret := _m.Called(_ca...) var r0 common.Address - if rf, ok := ret.Get(0).(func(...common.Address) common.Address); ok { - r0 = rf(addresses...) + if rf, ok := ret.Get(0).(func(*big.Int, ...common.Address) common.Address); ok { + r0 = rf(chainID, addresses...) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(common.Address) @@ -200,8 +201,8 @@ func (_m *Eth) GetRoundRobinAddress(addresses ...common.Address) (common.Address } var r1 error - if rf, ok := ret.Get(1).(func(...common.Address) error); ok { - r1 = rf(addresses...) + if rf, ok := ret.Get(1).(func(*big.Int, ...common.Address) error); ok { + r1 = rf(chainID, addresses...) } else { r1 = ret.Error(1) } @@ -329,13 +330,13 @@ func (_m *Eth) Import(keyJSON []byte, password string, chainID *big.Int) (ethkey return r0, r1 } -// SendingKeys provides a mock function with given fields: -func (_m *Eth) SendingKeys() ([]ethkey.KeyV2, error) { - ret := _m.Called() +// SendingKeys provides a mock function with given fields: chainID +func (_m *Eth) SendingKeys(chainID *big.Int) ([]ethkey.KeyV2, error) { + ret := _m.Called(chainID) var r0 []ethkey.KeyV2 - if rf, ok := ret.Get(0).(func() []ethkey.KeyV2); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(*big.Int) []ethkey.KeyV2); ok { + r0 = rf(chainID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]ethkey.KeyV2) @@ -343,8 +344,8 @@ func (_m *Eth) SendingKeys() ([]ethkey.KeyV2, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(chainID) } else { r1 = ret.Error(1) } diff --git a/core/services/keystore/mocks/master.go b/core/services/keystore/mocks/master.go index ab634d73a18..493b9548fec 100644 --- a/core/services/keystore/mocks/master.go +++ b/core/services/keystore/mocks/master.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/keystore/mocks/ocr.go b/core/services/keystore/mocks/ocr.go index d137cf58a98..c330dfc753d 100644 --- a/core/services/keystore/mocks/ocr.go +++ b/core/services/keystore/mocks/ocr.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/keystore/mocks/ocr2.go b/core/services/keystore/mocks/ocr2.go index 6a318a19816..f5940f12015 100644 --- a/core/services/keystore/mocks/ocr2.go +++ b/core/services/keystore/mocks/ocr2.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/keystore/mocks/p2p.go b/core/services/keystore/mocks/p2p.go index 89dc48ba7d5..9c5fd86a03a 100644 --- a/core/services/keystore/mocks/p2p.go +++ b/core/services/keystore/mocks/p2p.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/keystore/mocks/solana.go b/core/services/keystore/mocks/solana.go index 72e0e5c0cb6..6b41cf5f4f1 100644 --- a/core/services/keystore/mocks/solana.go +++ b/core/services/keystore/mocks/solana.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/keystore/mocks/terra.go b/core/services/keystore/mocks/terra.go index 306d1bbffaf..5200387fa4b 100644 --- a/core/services/keystore/mocks/terra.go +++ b/core/services/keystore/mocks/terra.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/keystore/mocks/vrf.go b/core/services/keystore/mocks/vrf.go index 335fb0732c2..738d6304ec6 100644 --- a/core/services/keystore/mocks/vrf.go +++ b/core/services/keystore/mocks/vrf.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/mocks/checker.go b/core/services/mocks/checker.go index f053c6dea6a..c57fa77f562 100644 --- a/core/services/mocks/checker.go +++ b/core/services/mocks/checker.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/mocks/config.go b/core/services/mocks/config.go new file mode 100644 index 00000000000..70e088dc9c6 --- /dev/null +++ b/core/services/mocks/config.go @@ -0,0 +1,24 @@ +// Code generated by mockery v2.10.1. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Config is an autogenerated mock type for the Config type +type Config struct { + mock.Mock +} + +// LogSQL provides a mock function with given fields: +func (_m *Config) LogSQL() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} diff --git a/core/services/ocr/config.go b/core/services/ocr/config.go index 3ab45fa4b61..cd9f84b82e9 100644 --- a/core/services/ocr/config.go +++ b/core/services/ocr/config.go @@ -5,6 +5,13 @@ import ( ocrtypes "github.com/smartcontractkit/libocr/offchainreporting/types" ) +//go:generate mockery --name Config --output ../mocks/ --case=underscore + +// Config contains OCR configurations for a job. +type Config interface { + LogSQL() bool +} + func toLocalConfig(cfg ValidationConfig, spec job.OCROracleSpec) ocrtypes.LocalConfig { concreteSpec := job.LoadEnvConfigVarsLocalOCR(cfg, spec) lc := ocrtypes.LocalConfig{ diff --git a/core/services/ocr/contract_tracker.go b/core/services/ocr/contract_tracker.go index 5002d5657fd..9e3a82d61e0 100644 --- a/core/services/ocr/contract_tracker.go +++ b/core/services/ocr/contract_tracker.go @@ -19,11 +19,11 @@ import ( "github.com/smartcontractkit/libocr/offchainreporting/confighelper" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting/types" - "github.com/smartcontractkit/chainlink/core/chains" evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" httypes "github.com/smartcontractkit/chainlink/core/chains/evm/headtracker/types" "github.com/smartcontractkit/chainlink/core/chains/evm/log" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/offchain_aggregator_wrapper" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/ocrcommon" @@ -79,7 +79,7 @@ type ( lrrMu sync.RWMutex // ContractConfig - configsMB utils.Mailbox + configsMB utils.Mailbox[ocrtypes.ContractConfig] chConfigs chan ocrtypes.ContractConfig // LatestBlockHeight @@ -128,7 +128,7 @@ func NewOCRContractTracker( nil, offchainaggregator.OffchainAggregatorRoundRequested{}, sync.RWMutex{}, - *utils.NewMailbox(configMailboxSanityLimit), + *utils.NewMailbox[ocrtypes.ContractConfig](configMailboxSanityLimit), make(chan ocrtypes.ContractConfig), -1, sync.RWMutex{}, @@ -212,14 +212,10 @@ func (t *OCRContractTracker) processLogs() { // new config. To avoid blocking the log broadcaster, we use this // background thread to deliver them and a mailbox as the buffer. for { - x, exists := t.configsMB.Retrieve() + cc, exists := t.configsMB.Retrieve() if !exists { break } - cc, ok := x.(ocrtypes.ContractConfig) - if !ok { - panic(fmt.Sprintf("expected ocrtypes.ContractConfig but got %T", x)) - } select { case t.chConfigs <- cc: case <-t.chStop: @@ -393,7 +389,7 @@ func (t *OCRContractTracker) LatestBlockHeight(ctx context.Context) (blockheight // We skip confirmation checking anyway on Optimism so there's no need to // care about the block height; we have no way of getting the L1 block // height anyway - if t.cfg.ChainType() == chains.Optimism { + if t.cfg.ChainType() == config.ChainOptimism { return 0, nil } latestBlockHeight := t.getLatestBlockHeight() diff --git a/core/services/ocr/database.go b/core/services/ocr/database.go index 38905861b73..090cc19d76c 100644 --- a/core/services/ocr/database.go +++ b/core/services/ocr/database.go @@ -16,10 +16,11 @@ import ( "github.com/smartcontractkit/chainlink/core/utils" "github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting/types" + "github.com/smartcontractkit/sqlx" ) type db struct { - *sql.DB + q pg.Q oracleSpecID int32 lggr logger.Logger } @@ -30,28 +31,38 @@ var ( ) // NewDB returns a new DB scoped to this oracleSpecID -func NewDB(sqldb *sql.DB, oracleSpecID int32, lggr logger.Logger) *db { - return &db{sqldb, oracleSpecID, lggr.Named("OCRDB")} +func NewDB(sqlxDB *sqlx.DB, oracleSpecID int32, lggr logger.Logger, cfg pg.LogConfig) *db { + namedLogger := lggr.Named("OCR.DB") + + return &db{ + q: pg.NewQ(sqlxDB, namedLogger, cfg), + oracleSpecID: oracleSpecID, + lggr: lggr, + } } func (d *db) ReadState(ctx context.Context, cd ocrtypes.ConfigDigest) (ps *ocrtypes.PersistentState, err error) { - q := d.QueryRowContext(ctx, ` -SELECT epoch, highest_sent_epoch, highest_received_epoch -FROM ocr_persistent_states -WHERE ocr_oracle_spec_id = $1 AND config_digest = $2 -LIMIT 1`, d.oracleSpecID, cd) + stmt := ` + SELECT epoch, highest_sent_epoch, highest_received_epoch + FROM ocr_persistent_states + WHERE ocr_oracle_spec_id = $1 AND config_digest = $2 + LIMIT 1` ps = new(ocrtypes.PersistentState) var tmp []int64 - err = q.Scan(&ps.Epoch, &ps.HighestSentEpoch, pq.Array(&tmp)) + var highestSentEpochTmp int64 + err = d.q.QueryRowxContext(ctx, stmt, d.oracleSpecID, cd).Scan(&ps.Epoch, &highestSentEpochTmp, pq.Array(&tmp)) if errors.Is(err, sql.ErrNoRows) { return nil, nil - } else if err != nil { + } + if err != nil { return nil, errors.Wrap(err, "ReadState failed") } + ps.HighestSentEpoch = uint32(highestSentEpochTmp) + for _, v := range tmp { ps.HighestReceivedEpoch = append(ps.HighestReceivedEpoch, uint32(v)) } @@ -64,39 +75,51 @@ func (d *db) WriteState(ctx context.Context, cd ocrtypes.ConfigDigest, state ocr for _, v := range state.HighestReceivedEpoch { highestReceivedEpoch = append(highestReceivedEpoch, int64(v)) } - _, err := d.ExecContext(ctx, ` -INSERT INTO ocr_persistent_states (ocr_oracle_spec_id, config_digest, epoch, highest_sent_epoch, highest_received_epoch, created_at, updated_at) -VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) -ON CONFLICT (ocr_oracle_spec_id, config_digest) DO UPDATE SET - (epoch, highest_sent_epoch, highest_received_epoch, updated_at) - = - ( - EXCLUDED.epoch, - EXCLUDED.highest_sent_epoch, - EXCLUDED.highest_received_epoch, - NOW() + + stmt := ` + INSERT INTO ocr_persistent_states (ocr_oracle_spec_id, config_digest, epoch, highest_sent_epoch, highest_received_epoch, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) + ON CONFLICT (ocr_oracle_spec_id, config_digest) DO UPDATE SET + (epoch, highest_sent_epoch, highest_received_epoch, updated_at) + = + ( + EXCLUDED.epoch, + EXCLUDED.highest_sent_epoch, + EXCLUDED.highest_received_epoch, + NOW() + ) + ` + _, err := d.q.WithOpts(pg.WithLongQueryTimeout()).ExecContext( + ctx, stmt, d.oracleSpecID, cd, state.Epoch, state.HighestSentEpoch, pq.Array(&highestReceivedEpoch), ) -`, d.oracleSpecID, cd, state.Epoch, state.HighestSentEpoch, pq.Array(&highestReceivedEpoch)) return errors.Wrap(err, "WriteState failed") } func (d *db) ReadConfig(ctx context.Context) (c *ocrtypes.ContractConfig, err error) { - q := d.QueryRowContext(ctx, ` + stmt := ` SELECT config_digest, signers, transmitters, threshold, encoded_config_version, encoded FROM ocr_contract_configs WHERE ocr_oracle_spec_id = $1 - LIMIT 1`, d.oracleSpecID) + LIMIT 1` c = new(ocrtypes.ContractConfig) var signers [][]byte var transmitters [][]byte - err = q.Scan(&c.ConfigDigest, (*pq.ByteaArray)(&signers), (*pq.ByteaArray)(&transmitters), &c.Threshold, &c.EncodedConfigVersion, &c.Encoded) + err = d.q.QueryRowContext(ctx, stmt, d.oracleSpecID).Scan( + &c.ConfigDigest, + (*pq.ByteaArray)(&signers), + (*pq.ByteaArray)(&transmitters), + &c.Threshold, + &c.EncodedConfigVersion, + &c.Encoded, + ) if errors.Is(err, sql.ErrNoRows) { return nil, nil - } else if err != nil { + } + if err != nil { return nil, errors.Wrap(err, "ReadConfig failed") } @@ -119,18 +142,19 @@ func (d *db) WriteConfig(ctx context.Context, c ocrtypes.ContractConfig) error { for _, t := range c.Transmitters { transmitters = append(transmitters, t.Bytes()) } - _, err := d.ExecContext(ctx, ` -INSERT INTO ocr_contract_configs (ocr_oracle_spec_id, config_digest, signers, transmitters, threshold, encoded_config_version, encoded, created_at, updated_at) -VALUES ($1, $2, $3, $4, $5, $6, $7, NOW(), NOW()) -ON CONFLICT (ocr_oracle_spec_id) DO UPDATE SET - config_digest = EXCLUDED.config_digest, - signers = EXCLUDED.signers, - transmitters = EXCLUDED.transmitters, - threshold = EXCLUDED.threshold, - encoded_config_version = EXCLUDED.encoded_config_version, - encoded = EXCLUDED.encoded, - updated_at = NOW() -`, d.oracleSpecID, c.ConfigDigest, pq.ByteaArray(signers), pq.ByteaArray(transmitters), c.Threshold, int(c.EncodedConfigVersion), c.Encoded) + stmt := ` + INSERT INTO ocr_contract_configs (ocr_oracle_spec_id, config_digest, signers, transmitters, threshold, encoded_config_version, encoded, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, NOW(), NOW()) + ON CONFLICT (ocr_oracle_spec_id) DO UPDATE SET + config_digest = EXCLUDED.config_digest, + signers = EXCLUDED.signers, + transmitters = EXCLUDED.transmitters, + threshold = EXCLUDED.threshold, + encoded_config_version = EXCLUDED.encoded_config_version, + encoded = EXCLUDED.encoded, + updated_at = NOW() + ` + _, err := d.q.ExecContext(ctx, stmt, d.oracleSpecID, c.ConfigDigest, pq.ByteaArray(signers), pq.ByteaArray(transmitters), c.Threshold, int(c.EncodedConfigVersion), c.Encoded) return errors.Wrap(err, "WriteConfig failed") } @@ -150,37 +174,39 @@ func (d *db) StorePendingTransmission(ctx context.Context, k ocrtypes.PendingTra ss = append(ss, v[:]) } - _, err := d.ExecContext(ctx, ` -INSERT INTO ocr_pending_transmissions ( - ocr_oracle_spec_id, - config_digest, - epoch, - round, - time, - median, - serialized_report, - rs, - ss, - vs, - created_at, - updated_at -) -VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,NOW(),NOW()) -ON CONFLICT (ocr_oracle_spec_id, config_digest, epoch, round) DO UPDATE SET - time = EXCLUDED.time, - median = EXCLUDED.median, - serialized_report = EXCLUDED.serialized_report, - rs = EXCLUDED.rs, - ss = EXCLUDED.ss, - vs = EXCLUDED.vs, - updated_at = NOW() -`, d.oracleSpecID, k.ConfigDigest, k.Epoch, k.Round, p.Time, median, p.SerializedReport, pq.ByteaArray(rs), pq.ByteaArray(ss), p.Vs[:]) + stmt := ` + INSERT INTO ocr_pending_transmissions ( + ocr_oracle_spec_id, + config_digest, + epoch, + round, + time, + median, + serialized_report, + rs, + ss, + vs, + created_at, + updated_at + ) + VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,NOW(),NOW()) + ON CONFLICT (ocr_oracle_spec_id, config_digest, epoch, round) DO UPDATE SET + time = EXCLUDED.time, + median = EXCLUDED.median, + serialized_report = EXCLUDED.serialized_report, + rs = EXCLUDED.rs, + ss = EXCLUDED.ss, + vs = EXCLUDED.vs, + updated_at = NOW() + ` + + _, err := d.q.ExecContext(ctx, stmt, d.oracleSpecID, k.ConfigDigest, k.Epoch, k.Round, p.Time, median, p.SerializedReport, pq.ByteaArray(rs), pq.ByteaArray(ss), p.Vs[:]) return errors.Wrap(err, "StorePendingTransmission failed") } func (d *db) PendingTransmissionsWithConfigDigest(ctx context.Context, cd ocrtypes.ConfigDigest) (map[ocrtypes.PendingTransmissionKey]ocrtypes.PendingTransmission, error) { - rows, err := d.QueryContext(ctx, ` + rows, err := d.q.QueryContext(ctx, ` SELECT config_digest, epoch, @@ -241,7 +267,7 @@ WHERE ocr_oracle_spec_id = $1 AND config_digest = $2 } func (d *db) DeletePendingTransmission(ctx context.Context, k ocrtypes.PendingTransmissionKey) (err error) { - _, err = d.ExecContext(ctx, ` + _, err = d.q.WithOpts(pg.WithLongQueryTimeout()).ExecContext(ctx, ` DELETE FROM ocr_pending_transmissions WHERE ocr_oracle_spec_id = $1 AND config_digest = $2 AND epoch = $3 AND round = $4 `, d.oracleSpecID, k.ConfigDigest, k.Epoch, k.Round) @@ -252,7 +278,7 @@ WHERE ocr_oracle_spec_id = $1 AND config_digest = $2 AND epoch = $3 AND round = } func (d *db) DeletePendingTransmissionsOlderThan(ctx context.Context, t time.Time) (err error) { - _, err = d.ExecContext(ctx, ` + _, err = d.q.WithOpts(pg.WithLongQueryTimeout()).ExecContext(ctx, ` DELETE FROM ocr_pending_transmissions WHERE ocr_oracle_spec_id = $1 AND time < $2 `, d.oracleSpecID, t) @@ -281,7 +307,7 @@ VALUES ($1,$2,$3,$4,$5,$6) ON CONFLICT (ocr_oracle_spec_id) DO UPDATE SET } func (d *db) LoadLatestRoundRequested() (rr offchainaggregator.OffchainAggregatorRoundRequested, err error) { - rows, err := d.Query(` + rows, err := d.q.Query(` SELECT requester, config_digest, epoch, round, raw FROM ocr_latest_round_requested WHERE ocr_oracle_spec_id = $1 diff --git a/core/services/ocr/database_test.go b/core/services/ocr/database_test.go index 2a15fa0ff63..2f9aaea4877 100644 --- a/core/services/ocr/database_test.go +++ b/core/services/ocr/database_test.go @@ -3,6 +3,7 @@ package ocr_test import ( "bytes" "context" + "fmt" "math/big" "testing" "time" @@ -12,7 +13,6 @@ import ( "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/core/services/ocr" - "github.com/smartcontractkit/chainlink/core/services/pg" "github.com/smartcontractkit/chainlink/core/utils" "github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting/types" @@ -24,7 +24,6 @@ var ctx = context.Background() func Test_DB_ReadWriteState(t *testing.T) { db := pgtest.NewSqlxDB(t) - sqlDB := db.DB configDigest := cltest.MakeConfigDigest(t) cfg := cltest.NewTestGeneralConfig(t) @@ -33,7 +32,8 @@ func Test_DB_ReadWriteState(t *testing.T) { spec := cltest.MustInsertOffchainreportingOracleSpec(t, db, key.Address) t.Run("reads and writes state", func(t *testing.T) { - odb := ocr.NewTestDB(t, sqlDB, spec.ID) + fmt.Println("creating DB") + odb := ocr.NewTestDB(t, db, spec.ID) state := ocrtypes.PersistentState{ Epoch: 1, HighestSentEpoch: 2, @@ -50,7 +50,7 @@ func Test_DB_ReadWriteState(t *testing.T) { }) t.Run("updates state", func(t *testing.T) { - odb := ocr.NewTestDB(t, sqlDB, spec.ID) + odb := ocr.NewTestDB(t, db, spec.ID) newState := ocrtypes.PersistentState{ Epoch: 2, HighestSentEpoch: 3, @@ -67,7 +67,7 @@ func Test_DB_ReadWriteState(t *testing.T) { }) t.Run("does not return result for wrong spec", func(t *testing.T) { - odb := ocr.NewTestDB(t, sqlDB, spec.ID) + odb := ocr.NewTestDB(t, db, spec.ID) state := ocrtypes.PersistentState{ Epoch: 3, HighestSentEpoch: 4, @@ -78,7 +78,7 @@ func Test_DB_ReadWriteState(t *testing.T) { require.NoError(t, err) // db with different spec - odb = ocr.NewTestDB(t, sqlDB, -1) + odb = ocr.NewTestDB(t, db, -1) readState, err := odb.ReadState(ctx, configDigest) require.NoError(t, err) @@ -87,7 +87,7 @@ func Test_DB_ReadWriteState(t *testing.T) { }) t.Run("does not return result for wrong config digest", func(t *testing.T) { - odb := ocr.NewTestDB(t, sqlDB, spec.ID) + odb := ocr.NewTestDB(t, db, spec.ID) state := ocrtypes.PersistentState{ Epoch: 4, HighestSentEpoch: 5, @@ -106,7 +106,7 @@ func Test_DB_ReadWriteState(t *testing.T) { func Test_DB_ReadWriteConfig(t *testing.T) { db := pgtest.NewSqlxDB(t) - sqlDB := db.DB + sqlDB := db cfg := cltest.NewTestGeneralConfig(t) config := ocrtypes.ContractConfig{ @@ -188,7 +188,7 @@ func assertPendingTransmissionEqual(t *testing.T, pt1, pt2 ocrtypes.PendingTrans func Test_DB_PendingTransmissions(t *testing.T) { db := pgtest.NewSqlxDB(t) - sqlDB := db.DB + sqlDB := db cfg := cltest.NewTestGeneralConfig(t) ethKeyStore := cltest.NewKeyStore(t, db, cfg).Eth() key, _ := cltest.MustInsertRandomKey(t, ethKeyStore) @@ -393,7 +393,7 @@ func Test_DB_PendingTransmissions(t *testing.T) { func Test_DB_LatestRoundRequested(t *testing.T) { db := pgtest.NewSqlxDB(t) - sqlDB := db.DB + sqlDB := db pgtest.MustExec(t, db, `SET CONSTRAINTS offchainreporting_latest_roun_offchainreporting_oracle_spe_fkey DEFERRED`) @@ -411,7 +411,7 @@ func Test_DB_LatestRoundRequested(t *testing.T) { } t.Run("saves latest round requested", func(t *testing.T) { - err := odb.SaveLatestRoundRequested(pg.WrapDbWithSqlx(sqlDB), rr) + err := odb.SaveLatestRoundRequested(sqlDB, rr) require.NoError(t, err) rawLog.Index = 42 @@ -425,7 +425,7 @@ func Test_DB_LatestRoundRequested(t *testing.T) { Raw: rawLog, } - err = odb.SaveLatestRoundRequested(pg.WrapDbWithSqlx(sqlDB), rr) + err = odb.SaveLatestRoundRequested(sqlDB, rr) require.NoError(t, err) }) diff --git a/core/services/ocr/delegate.go b/core/services/ocr/delegate.go index 755ef60a864..de78242d2ca 100644 --- a/core/services/ocr/delegate.go +++ b/core/services/ocr/delegate.go @@ -35,6 +35,7 @@ type Delegate struct { monitoringEndpointGen telemetry.MonitoringEndpointGenerator chainSet evm.ChainSet lggr logger.Logger + cfg Config } var _ job.Delegate = (*Delegate)(nil) @@ -50,6 +51,7 @@ func NewDelegate( monitoringEndpointGen telemetry.MonitoringEndpointGenerator, chainSet evm.ChainSet, lggr logger.Logger, + cfg Config, ) *Delegate { return &Delegate{ db, @@ -60,6 +62,7 @@ func NewDelegate( monitoringEndpointGen, chainSet, lggr.Named("OCR"), + cfg, } } @@ -104,7 +107,7 @@ func (d Delegate) ServicesForSpec(jb job.Job) (services []job.ServiceCtx, err er return nil, errors.Wrap(err, "could not instantiate NewOffchainAggregatorCaller") } - ocrDB := NewDB(d.db.DB, concreteSpec.ID, lggr) + ocrDB := NewDB(d.db, concreteSpec.ID, lggr, d.cfg) tracker := NewOCRContractTracker( contract, diff --git a/core/services/ocr/helpers_internal_test.go b/core/services/ocr/helpers_internal_test.go index aa41c3a52a6..ac8f542c578 100644 --- a/core/services/ocr/helpers_internal_test.go +++ b/core/services/ocr/helpers_internal_test.go @@ -1,9 +1,11 @@ package ocr import ( - "database/sql" "testing" + "github.com/smartcontractkit/sqlx" + + "github.com/smartcontractkit/chainlink/core/internal/testutils/configtest" "github.com/smartcontractkit/chainlink/core/logger" ) @@ -11,6 +13,6 @@ func (c *ConfigOverriderImpl) ExportedUpdateFlagsStatus() error { return c.updateFlagsStatus() } -func NewTestDB(t *testing.T, sqldb *sql.DB, oracleSpecID int32) *db { - return NewDB(sqldb, oracleSpecID, logger.TestLogger(t)) +func NewTestDB(t *testing.T, sqldb *sqlx.DB, oracleSpecID int32) *db { + return NewDB(sqldb, oracleSpecID, logger.TestLogger(t), configtest.NewTestGeneralConfig(t)) } diff --git a/core/services/ocr/mocks/ocr_contract_tracker_db.go b/core/services/ocr/mocks/ocr_contract_tracker_db.go index b1175c4fa66..440c544b3c8 100644 --- a/core/services/ocr/mocks/ocr_contract_tracker_db.go +++ b/core/services/ocr/mocks/ocr_contract_tracker_db.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/ocr/validate.go b/core/services/ocr/validate.go index 64bca6ee250..e3c9ed88c3a 100644 --- a/core/services/ocr/validate.go +++ b/core/services/ocr/validate.go @@ -6,17 +6,18 @@ import ( "github.com/multiformats/go-multiaddr" "github.com/pelletier/go-toml" "github.com/pkg/errors" - "github.com/smartcontractkit/chainlink/core/chains" + "github.com/smartcontractkit/libocr/offchainreporting" + "github.com/smartcontractkit/chainlink/core/chains/evm" + "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/services/job" "github.com/smartcontractkit/chainlink/core/services/keystore/keys/ethkey" "github.com/smartcontractkit/chainlink/core/services/keystore/keys/p2pkey" "github.com/smartcontractkit/chainlink/core/services/ocrcommon" - "github.com/smartcontractkit/libocr/offchainreporting" ) type ValidationConfig interface { - ChainType() chains.ChainType + ChainType() config.ChainType Dev() bool OCRBlockchainTimeout() time.Duration OCRContractConfirmations() uint16 diff --git a/core/services/ocr2/database.go b/core/services/ocr2/database.go index 54320b1ad3c..b4f26a6b4e8 100644 --- a/core/services/ocr2/database.go +++ b/core/services/ocr2/database.go @@ -10,12 +10,14 @@ import ( "github.com/pkg/errors" ocrcommon "github.com/smartcontractkit/libocr/commontypes" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2/types" + "github.com/smartcontractkit/sqlx" "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/pg" ) type db struct { - *sql.DB + q pg.Q oracleSpecID int32 lggr logger.Logger } @@ -25,28 +27,38 @@ var ( ) // NewDB returns a new DB scoped to this oracleSpecID -func NewDB(sqldb *sql.DB, oracleSpecID int32, lggr logger.Logger) *db { - return &db{sqldb, oracleSpecID, lggr} +func NewDB(sqlxDB *sqlx.DB, oracleSpecID int32, lggr logger.Logger, cfg pg.LogConfig) *db { + namedLogger := lggr.Named("OCR2.DB") + + return &db{ + q: pg.NewQ(sqlxDB, namedLogger, cfg), + oracleSpecID: oracleSpecID, + lggr: lggr, + } } func (d *db) ReadState(ctx context.Context, cd ocrtypes.ConfigDigest) (ps *ocrtypes.PersistentState, err error) { - q := d.QueryRowContext(ctx, ` -SELECT epoch, highest_sent_epoch, highest_received_epoch -FROM ocr2_persistent_states -WHERE ocr2_oracle_spec_id = $1 AND config_digest = $2 -LIMIT 1`, d.oracleSpecID, cd) + stmt := ` + SELECT epoch, highest_sent_epoch, highest_received_epoch + FROM ocr2_persistent_states + WHERE ocr2_oracle_spec_id = $1 AND config_digest = $2 + LIMIT 1` ps = new(ocrtypes.PersistentState) var tmp []int64 - err = q.Scan(&ps.Epoch, &ps.HighestSentEpoch, pq.Array(&tmp)) + var highestSentEpochTmp int64 + err = d.q.QueryRowxContext(ctx, stmt, d.oracleSpecID, cd).Scan(&ps.Epoch, &highestSentEpochTmp, pq.Array(&tmp)) if errors.Is(err, sql.ErrNoRows) { return nil, nil - } else if err != nil { + } + if err != nil { return nil, errors.Wrap(err, "ReadState failed") } + ps.HighestSentEpoch = uint32(highestSentEpochTmp) + for _, v := range tmp { ps.HighestReceivedEpoch = append(ps.HighestReceivedEpoch, uint32(v)) } @@ -59,47 +71,52 @@ func (d *db) WriteState(ctx context.Context, cd ocrtypes.ConfigDigest, state ocr for _, v := range state.HighestReceivedEpoch { highestReceivedEpoch = append(highestReceivedEpoch, int64(v)) } - _, err := d.ExecContext(ctx, ` -INSERT INTO ocr2_persistent_states ( - ocr2_oracle_spec_id, - config_digest, - epoch, - highest_sent_epoch, - highest_received_epoch, - created_at, - updated_at -) -VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) -ON CONFLICT (ocr2_oracle_spec_id, config_digest) -DO UPDATE SET ( + + stmt := ` + INSERT INTO ocr2_persistent_states ( + ocr2_oracle_spec_id, + config_digest, epoch, highest_sent_epoch, highest_received_epoch, + created_at, updated_at - ) = ( - EXCLUDED.epoch, - EXCLUDED.highest_sent_epoch, - EXCLUDED.highest_received_epoch, - NOW() - )`, d.oracleSpecID, cd, state.Epoch, state.HighestSentEpoch, pq.Array(&highestReceivedEpoch)) + ) + VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) + ON CONFLICT (ocr2_oracle_spec_id, config_digest) + DO UPDATE SET ( + epoch, + highest_sent_epoch, + highest_received_epoch, + updated_at + ) = ( + EXCLUDED.epoch, + EXCLUDED.highest_sent_epoch, + EXCLUDED.highest_received_epoch, + NOW() + )` + + _, err := d.q.WithOpts(pg.WithLongQueryTimeout()).ExecContext( + ctx, stmt, d.oracleSpecID, cd, state.Epoch, state.HighestSentEpoch, pq.Array(&highestReceivedEpoch), + ) return errors.Wrap(err, "WriteState failed") } func (d *db) ReadConfig(ctx context.Context) (c *ocrtypes.ContractConfig, err error) { - q := d.QueryRowContext(ctx, ` -SELECT - config_digest, - config_count, - signers, - transmitters, - f, - onchain_config, - offchain_config_version, - offchain_config -FROM ocr2_contract_configs -WHERE ocr2_oracle_spec_id = $1 -LIMIT 1`, d.oracleSpecID) + stmt := ` + SELECT + config_digest, + config_count, + signers, + transmitters, + f, + onchain_config, + offchain_config_version, + offchain_config + FROM ocr2_contract_configs + WHERE ocr2_oracle_spec_id = $1 + LIMIT 1` c = new(ocrtypes.ContractConfig) @@ -107,7 +124,7 @@ LIMIT 1`, d.oracleSpecID) signers := [][]byte{} transmitters := [][]byte{} - err = q.Scan( + err = d.q.QueryRowx(stmt, d.oracleSpecID).Scan( &digest, &c.ConfigCount, (*pq.ByteaArray)(&signers), @@ -119,7 +136,8 @@ LIMIT 1`, d.oracleSpecID) ) if errors.Is(err, sql.ErrNoRows) { return nil, nil - } else if err != nil { + } + if err != nil { return nil, errors.Wrap(err, "ReadConfig failed") } @@ -144,32 +162,33 @@ func (d *db) WriteConfig(ctx context.Context, c ocrtypes.ContractConfig) error { for _, s := range c.Signers { signers = append(signers, []byte(s)) } - _, err := d.ExecContext(ctx, ` -INSERT INTO ocr2_contract_configs ( - ocr2_oracle_spec_id, - config_digest, - config_count, - signers, - transmitters, - f, - onchain_config, - offchain_config_version, - offchain_config, - created_at, - updated_at -) -VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, NOW(), NOW()) -ON CONFLICT (ocr2_oracle_spec_id) DO UPDATE SET - config_digest = EXCLUDED.config_digest, - config_count = EXCLUDED.config_count, - signers = EXCLUDED.signers, - transmitters = EXCLUDED.transmitters, - f = EXCLUDED.f, - onchain_config = EXCLUDED.onchain_config, - offchain_config_version = EXCLUDED.offchain_config_version, - offchain_config = EXCLUDED.offchain_config, - updated_at = NOW() -`, + stmt := ` + INSERT INTO ocr2_contract_configs ( + ocr2_oracle_spec_id, + config_digest, + config_count, + signers, + transmitters, + f, + onchain_config, + offchain_config_version, + offchain_config, + created_at, + updated_at + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, NOW(), NOW()) + ON CONFLICT (ocr2_oracle_spec_id) DO UPDATE SET + config_digest = EXCLUDED.config_digest, + config_count = EXCLUDED.config_count, + signers = EXCLUDED.signers, + transmitters = EXCLUDED.transmitters, + f = EXCLUDED.f, + onchain_config = EXCLUDED.onchain_config, + offchain_config_version = EXCLUDED.offchain_config_version, + offchain_config = EXCLUDED.offchain_config, + updated_at = NOW() + ` + _, err := d.q.ExecContext(ctx, stmt, d.oracleSpecID, c.ConfigDigest, c.ConfigCount, @@ -199,35 +218,37 @@ func (d *db) StorePendingTransmission(ctx context.Context, t ocrtypes.ReportTime extraHash := make([]byte, 32) copy(extraHash[:], tx.ExtraHash[:]) - _, err := d.ExecContext(ctx, ` -INSERT INTO ocr2_pending_transmissions ( - ocr2_oracle_spec_id, - config_digest, - epoch, - round, - - time, - extra_hash, - report, - attributed_signatures, - - created_at, - updated_at -) -VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW(), NOW()) -ON CONFLICT (ocr2_oracle_spec_id, config_digest, epoch, round) DO UPDATE SET - ocr2_oracle_spec_id = EXCLUDED.ocr2_oracle_spec_id, - config_digest = EXCLUDED.config_digest, - epoch = EXCLUDED.epoch, - round = EXCLUDED.round, - - time = EXCLUDED.time, - extra_hash = EXCLUDED.extra_hash, - report = EXCLUDED.report, - attributed_signatures = EXCLUDED.attributed_signatures, - - updated_at = NOW() -`, + stmt := ` + INSERT INTO ocr2_pending_transmissions ( + ocr2_oracle_spec_id, + config_digest, + epoch, + round, + + time, + extra_hash, + report, + attributed_signatures, + + created_at, + updated_at + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW(), NOW()) + ON CONFLICT (ocr2_oracle_spec_id, config_digest, epoch, round) DO UPDATE SET + ocr2_oracle_spec_id = EXCLUDED.ocr2_oracle_spec_id, + config_digest = EXCLUDED.config_digest, + epoch = EXCLUDED.epoch, + round = EXCLUDED.round, + + time = EXCLUDED.time, + extra_hash = EXCLUDED.extra_hash, + report = EXCLUDED.report, + attributed_signatures = EXCLUDED.attributed_signatures, + + updated_at = NOW() + ` + + _, err := d.q.ExecContext(ctx, stmt, d.oracleSpecID, digest, t.Epoch, @@ -242,18 +263,19 @@ ON CONFLICT (ocr2_oracle_spec_id, config_digest, epoch, round) DO UPDATE SET } func (d *db) PendingTransmissionsWithConfigDigest(ctx context.Context, cd ocrtypes.ConfigDigest) (map[ocrtypes.ReportTimestamp]ocrtypes.PendingTransmission, error) { - rows, err := d.QueryContext(ctx, ` -SELECT - config_digest, - epoch, - round, - time, - extra_hash, - report, - attributed_signatures -FROM ocr2_pending_transmissions -WHERE ocr2_oracle_spec_id = $1 AND config_digest = $2 -`, d.oracleSpecID, cd) + stmt := ` + SELECT + config_digest, + epoch, + round, + time, + extra_hash, + report, + attributed_signatures + FROM ocr2_pending_transmissions + WHERE ocr2_oracle_spec_id = $1 AND config_digest = $2 + ` + rows, err := d.q.QueryxContext(ctx, stmt, d.oracleSpecID, cd) if err != nil { return nil, errors.Wrap(err, "PendingTransmissionsWithConfigDigest failed to query rows") } @@ -299,7 +321,7 @@ WHERE ocr2_oracle_spec_id = $1 AND config_digest = $2 } func (d *db) DeletePendingTransmission(ctx context.Context, t ocrtypes.ReportTimestamp) (err error) { - _, err = d.ExecContext(ctx, ` + _, err = d.q.WithOpts(pg.WithLongQueryTimeout()).ExecContext(ctx, ` DELETE FROM ocr2_pending_transmissions WHERE ocr2_oracle_spec_id = $1 AND config_digest = $2 AND epoch = $3 AND round = $4 `, d.oracleSpecID, t.ConfigDigest, t.Epoch, t.Round) @@ -310,7 +332,7 @@ WHERE ocr2_oracle_spec_id = $1 AND config_digest = $2 AND epoch = $3 AND round } func (d *db) DeletePendingTransmissionsOlderThan(ctx context.Context, t time.Time) (err error) { - _, err = d.ExecContext(ctx, ` + _, err = d.q.WithOpts(pg.WithLongQueryTimeout()).ExecContext(ctx, ` DELETE FROM ocr2_pending_transmissions WHERE ocr2_oracle_spec_id = $1 AND time < $2 `, d.oracleSpecID, t) diff --git a/core/services/ocr2/database_test.go b/core/services/ocr2/database_test.go index 855f9dcdad7..8a572b8a718 100644 --- a/core/services/ocr2/database_test.go +++ b/core/services/ocr2/database_test.go @@ -66,7 +66,7 @@ func Test_DB_ReadWriteState(t *testing.T) { lggr := logger.TestLogger(t) t.Run("reads and writes state", func(t *testing.T) { - db := ocr2.NewDB(sqlDB.DB, spec.ID, lggr) + db := ocr2.NewDB(sqlDB, spec.ID, lggr, cfg) state := ocrtypes.PersistentState{ Epoch: 1, HighestSentEpoch: 2, @@ -83,7 +83,7 @@ func Test_DB_ReadWriteState(t *testing.T) { }) t.Run("updates state", func(t *testing.T) { - db := ocr2.NewDB(sqlDB.DB, spec.ID, lggr) + db := ocr2.NewDB(sqlDB, spec.ID, lggr, cfg) newState := ocrtypes.PersistentState{ Epoch: 2, HighestSentEpoch: 3, @@ -100,7 +100,7 @@ func Test_DB_ReadWriteState(t *testing.T) { }) t.Run("does not return result for wrong spec", func(t *testing.T) { - db := ocr2.NewDB(sqlDB.DB, spec.ID, lggr) + db := ocr2.NewDB(sqlDB, spec.ID, lggr, cfg) state := ocrtypes.PersistentState{ Epoch: 3, HighestSentEpoch: 4, @@ -111,7 +111,7 @@ func Test_DB_ReadWriteState(t *testing.T) { require.NoError(t, err) // odb with different spec - db = ocr2.NewDB(sqlDB.DB, -1, lggr) + db = ocr2.NewDB(sqlDB, -1, lggr, cfg) readState, err := db.ReadState(ctx, configDigest) require.NoError(t, err) @@ -120,7 +120,7 @@ func Test_DB_ReadWriteState(t *testing.T) { }) t.Run("does not return result for wrong config digest", func(t *testing.T) { - db := ocr2.NewDB(sqlDB.DB, spec.ID, lggr) + db := ocr2.NewDB(sqlDB, spec.ID, lggr, cfg) state := ocrtypes.PersistentState{ Epoch: 4, HighestSentEpoch: 5, @@ -157,7 +157,7 @@ func Test_DB_ReadWriteConfig(t *testing.T) { lggr := logger.TestLogger(t) t.Run("reads and writes config", func(t *testing.T) { - db := ocr2.NewDB(sqlDB.DB, spec.ID, lggr) + db := ocr2.NewDB(sqlDB, spec.ID, lggr, cfg) err := db.WriteConfig(ctx, config) require.NoError(t, err) @@ -169,7 +169,7 @@ func Test_DB_ReadWriteConfig(t *testing.T) { }) t.Run("updates config", func(t *testing.T) { - db := ocr2.NewDB(sqlDB.DB, spec.ID, lggr) + db := ocr2.NewDB(sqlDB, spec.ID, lggr, cfg) newConfig := ocrtypes.ContractConfig{ ConfigDigest: testhelpers.MakeConfigDigest(t), @@ -187,12 +187,12 @@ func Test_DB_ReadWriteConfig(t *testing.T) { }) t.Run("does not return result for wrong spec", func(t *testing.T) { - db := ocr2.NewDB(sqlDB.DB, spec.ID, lggr) + db := ocr2.NewDB(sqlDB, spec.ID, lggr, cfg) err := db.WriteConfig(ctx, config) require.NoError(t, err) - db = ocr2.NewDB(sqlDB.DB, -1, lggr) + db = ocr2.NewDB(sqlDB, -1, lggr, cfg) readConfig, err := db.ReadConfig(ctx) require.NoError(t, err) @@ -220,8 +220,8 @@ func Test_DB_PendingTransmissions(t *testing.T) { lggr := logger.TestLogger(t) spec := MustInsertOCROracleSpec(t, sqlDB, key.Address) spec2 := MustInsertOCROracleSpec(t, sqlDB, key.Address) - db := ocr2.NewDB(sqlDB.DB, spec.ID, lggr) - db2 := ocr2.NewDB(sqlDB.DB, spec2.ID, lggr) + db := ocr2.NewDB(sqlDB, spec.ID, lggr, cfg) + db2 := ocr2.NewDB(sqlDB, spec2.ID, lggr, cfg) configDigest := testhelpers.MakeConfigDigest(t) k := ocrtypes.ReportTimestamp{ @@ -411,7 +411,7 @@ func Test_DB_PendingTransmissions(t *testing.T) { require.Len(t, m, 1) // Didn't affect other oracleSpecIDs - db = ocr2.NewDB(sqlDB.DB, spec2.ID, lggr) + db = ocr2.NewDB(sqlDB, spec2.ID, lggr, cfg) m, err = db.PendingTransmissionsWithConfigDigest(ctx, configDigest) require.NoError(t, err) require.Len(t, m, 1) diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index 5b33d9e144f..0572b9b13fd 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -90,7 +90,7 @@ func (d Delegate) ServicesForSpec(jobSpec job.Job) ([]job.ServiceCtx, error) { return nil, errors.Wrap(err, "error calling 'relayer.NewOCR2Provider'") } - ocrDB := NewDB(d.db.DB, spec.ID, d.lggr) + ocrDB := NewDB(d.db, spec.ID, d.lggr, d.cfg) peerWrapper := d.peerWrapper if peerWrapper == nil { return nil, errors.New("cannot setup OCR2 job service, libp2p peer was missing") diff --git a/core/services/ocr2/mocks/config.go b/core/services/ocr2/mocks/config.go index 59ac35dfeca..55275569462 100644 --- a/core/services/ocr2/mocks/config.go +++ b/core/services/ocr2/mocks/config.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks @@ -41,6 +41,20 @@ func (_m *Config) JobPipelineResultWriteQueueDepth() uint64 { return r0 } +// LogSQL provides a mock function with given fields: +func (_m *Config) LogSQL() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + // OCR2BlockchainTimeout provides a mock function with given fields: func (_m *Config) OCR2BlockchainTimeout() time.Duration { ret := _m.Called() diff --git a/core/services/ocr2/validate/config.go b/core/services/ocr2/validate/config.go index aec7dd2650e..098ce6fb326 100644 --- a/core/services/ocr2/validate/config.go +++ b/core/services/ocr2/validate/config.go @@ -15,6 +15,7 @@ import ( type Config interface { config.OCR2Config Dev() bool + LogSQL() bool JobPipelineResultWriteQueueDepth() uint64 } diff --git a/core/services/ocrcommon/block_translator.go b/core/services/ocrcommon/block_translator.go index f9d3a24d9d4..07723fd2057 100644 --- a/core/services/ocrcommon/block_translator.go +++ b/core/services/ocrcommon/block_translator.go @@ -4,9 +4,9 @@ import ( "context" "math/big" - "github.com/smartcontractkit/chainlink/core/chains" evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/logger" ) @@ -19,9 +19,9 @@ type BlockTranslator interface { // NewBlockTranslator returns the block translator for the given chain func NewBlockTranslator(cfg Config, client evmclient.Client, lggr logger.Logger) BlockTranslator { switch cfg.ChainType() { - case chains.Arbitrum: + case config.ChainArbitrum: return NewArbitrumBlockTranslator(client, lggr) - case chains.XDai, chains.ExChain, chains.Optimism: + case config.ChainXDai, config.ChainExChain, config.ChainOptimism: fallthrough default: return &l1BlockTranslator{} diff --git a/core/services/ocrcommon/config.go b/core/services/ocrcommon/config.go index 9ae43ec374f..98629592a50 100644 --- a/core/services/ocrcommon/config.go +++ b/core/services/ocrcommon/config.go @@ -5,10 +5,11 @@ import ( "github.com/pkg/errors" - "github.com/smartcontractkit/chainlink/core/chains" + "github.com/smartcontractkit/libocr/commontypes" + + "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/services/keystore/keys/ethkey" "github.com/smartcontractkit/chainlink/core/services/keystore/keys/p2pkey" - "github.com/smartcontractkit/libocr/commontypes" ) type Config interface { @@ -31,7 +32,7 @@ type Config interface { P2PPeerID() p2pkey.PeerID P2PV2Bootstrappers() []commontypes.BootstrapperLocator FlagsContractAddress() string - ChainType() chains.ChainType + ChainType() config.ChainType } func ParseBootstrapPeers(peers []string) (bootstrapPeers []commontypes.BootstrapperLocator, err error) { diff --git a/core/services/pg/advisory_lock_test.go b/core/services/pg/advisory_lock_test.go index 4fcf87a45cf..3f53b1c1d2d 100644 --- a/core/services/pg/advisory_lock_test.go +++ b/core/services/pg/advisory_lock_test.go @@ -21,7 +21,7 @@ func newAdvisoryLock(t *testing.T, db *sqlx.DB, cfg *configtest.TestGeneralConfi } func Test_AdvisoryLock(t *testing.T) { - cfg, db := heavyweight.FullTestDB(t, "advisorylock", false, false) + cfg, db := heavyweight.FullTestDBEmpty(t, "advisorylock") check := 1 * time.Second cfg.Overrides.AdvisoryLockCheckInterval = &check diff --git a/core/services/pg/event_broadcaster.go b/core/services/pg/event_broadcaster.go index 10c441c3454..0090b2e748e 100644 --- a/core/services/pg/event_broadcaster.go +++ b/core/services/pg/event_broadcaster.go @@ -161,7 +161,7 @@ func (b *eventBroadcaster) Subscribe(channel, payloadFilter string) (Subscriptio channel: channel, payloadFilter: payloadFilter, eventBroadcaster: b, - queue: utils.NewBoundedQueue(1000), + queue: utils.NewBoundedQueue[Event](1000), chEvents: make(chan Event), chDone: make(chan struct{}), lggr: logger.Sugared(b.lggr), @@ -233,7 +233,7 @@ type subscription struct { channel string payloadFilter string eventBroadcaster *eventBroadcaster - queue *utils.BoundedQueue + queue *utils.BoundedQueue[Event] processQueueWorker utils.SleeperTask chEvents chan Event chDone chan struct{} @@ -258,11 +258,7 @@ func (sub *subscription) processQueue() { defer cancel() for !sub.queue.Empty() { - event, ok := sub.queue.Take().(Event) - if !ok { - sub.lggr.AssumptionViolationf("Postgres event broadcaster subscription expected an Event, got %T", event) - continue - } + event := sub.queue.Take() select { case sub.chEvents <- event: case <-ctx.Done(): diff --git a/core/services/pg/event_broadcaster_test.go b/core/services/pg/event_broadcaster_test.go index a3809da8448..83befe9d5c5 100644 --- a/core/services/pg/event_broadcaster_test.go +++ b/core/services/pg/event_broadcaster_test.go @@ -16,7 +16,7 @@ import ( ) func TestEventBroadcaster(t *testing.T) { - config, _ := heavyweight.FullTestDB(t, "event_broadcaster", true, false) + config, _ := heavyweight.FullTestDBNoFixtures(t, "event_broadcaster") eventBroadcaster := cltest.NewEventBroadcaster(t, config.DatabaseURL()) require.NoError(t, eventBroadcaster.Start(testutils.Context(t))) diff --git a/core/services/pg/lease_lock_test.go b/core/services/pg/lease_lock_test.go index b823eb5cf60..e5c873f4a1a 100644 --- a/core/services/pg/lease_lock_test.go +++ b/core/services/pg/lease_lock_test.go @@ -22,7 +22,7 @@ func newLeaseLock(t *testing.T, db *sqlx.DB, cfg *configtest.TestGeneralConfig) } func Test_LeaseLock(t *testing.T) { - cfg, db := heavyweight.FullTestDB(t, "leaselock", true, false) + cfg, db := heavyweight.FullTestDBNoFixtures(t, "leaselock") duration := 15 * time.Second refresh := 100 * time.Millisecond cfg.Overrides.LeaseLockDuration = &duration @@ -182,7 +182,7 @@ func Test_LeaseLock(t *testing.T) { require.NoError(t, db.Close()) t.Run("on virgin database", func(t *testing.T) { - _, db := heavyweight.FullTestDB(t, "leaselock", false, false) + _, db := heavyweight.FullTestDBEmpty(t, "leaselock") leaseLock1 := newLeaseLock(t, db, cfg) diff --git a/core/services/pg/mocks/event_broadcaster.go b/core/services/pg/mocks/event_broadcaster.go index 873b908689a..ab4f2ed2639 100644 --- a/core/services/pg/mocks/event_broadcaster.go +++ b/core/services/pg/mocks/event_broadcaster.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/pg/mocks/queryer.go b/core/services/pg/mocks/queryer.go index 57487975d16..fabe6db5ada 100644 --- a/core/services/pg/mocks/queryer.go +++ b/core/services/pg/mocks/queryer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/pg/mocks/subscription.go b/core/services/pg/mocks/subscription.go index 1665a248e1e..6b0ac196340 100644 --- a/core/services/pg/mocks/subscription.go +++ b/core/services/pg/mocks/subscription.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/pg/q.go b/core/services/pg/q.go index 36922a0dea7..3146dd27138 100644 --- a/core/services/pg/q.go +++ b/core/services/pg/q.go @@ -5,12 +5,14 @@ import ( "database/sql" "fmt" "strings" + "sync" "time" "github.com/pkg/errors" - "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/sqlx" + + "github.com/smartcontractkit/chainlink/core/logger" ) // QOpt pattern for ORM methods aims to clarify usage and remove some common footguns, notably: @@ -176,23 +178,23 @@ func (q Q) Transaction(fc func(q Queryer) error, txOpts ...TxOptions) error { func (q Q) ExecQIter(query string, args ...interface{}) (sql.Result, context.CancelFunc, error) { ctx, cancel := q.Context() - q.logSqlQuery(query, args...) - begin := time.Now() - defer q.postSqlLog(ctx, begin) + ql := q.newQueryLogger(query, args) + ql.logSqlQuery() + defer ql.postSqlLog(ctx, time.Now()) res, err := q.Queryer.ExecContext(ctx, query, args...) - return res, cancel, q.withLogError(err) + return res, cancel, ql.withLogError(err) } func (q Q) ExecQ(query string, args ...interface{}) error { ctx, cancel := q.Context() defer cancel() - q.logSqlQuery(query, args...) - begin := time.Now() - defer q.postSqlLog(ctx, begin) + ql := q.newQueryLogger(query, args) + ql.logSqlQuery() + defer ql.postSqlLog(ctx, time.Now()) _, err := q.Queryer.ExecContext(ctx, query, args...) - return q.withLogError(err) + return ql.withLogError(err) } func (q Q) ExecQNamed(query string, arg interface{}) (err error) { query, args, err := q.BindNamed(query, arg) @@ -202,12 +204,12 @@ func (q Q) ExecQNamed(query string, arg interface{}) (err error) { ctx, cancel := q.Context() defer cancel() - q.logSqlQuery(query, args...) - begin := time.Now() - defer q.postSqlLog(ctx, begin) + ql := q.newQueryLogger(query, args) + ql.logSqlQuery() + defer ql.postSqlLog(ctx, time.Now()) _, err = q.Queryer.ExecContext(ctx, query, args...) - return q.withLogError(err) + return ql.withLogError(err) } // Select and Get are safe to wrap the context cancellation because the rows @@ -216,21 +218,21 @@ func (q Q) Select(dest interface{}, query string, args ...interface{}) error { ctx, cancel := q.Context() defer cancel() - q.logSqlQuery(query, args...) - begin := time.Now() - defer q.postSqlLog(ctx, begin) + ql := q.newQueryLogger(query, args) + ql.logSqlQuery() + defer ql.postSqlLog(ctx, time.Now()) - return q.withLogError(q.Queryer.SelectContext(ctx, dest, query, args...)) + return ql.withLogError(q.Queryer.SelectContext(ctx, dest, query, args...)) } func (q Q) Get(dest interface{}, query string, args ...interface{}) error { ctx, cancel := q.Context() defer cancel() - q.logSqlQuery(query, args...) - begin := time.Now() - defer q.postSqlLog(ctx, begin) + ql := q.newQueryLogger(query, args) + ql.logSqlQuery() + defer ql.postSqlLog(ctx, time.Now()) - return q.withLogError(q.Queryer.GetContext(ctx, dest, query, args...)) + return ql.withLogError(q.Queryer.GetContext(ctx, dest, query, args...)) } func (q Q) GetNamed(sql string, dest interface{}, arg interface{}) error { query, args, err := q.BindNamed(sql, arg) @@ -240,47 +242,65 @@ func (q Q) GetNamed(sql string, dest interface{}, arg interface{}) error { ctx, cancel := q.Context() defer cancel() - q.logSqlQuery(query, args...) - begin := time.Now() - defer q.postSqlLog(ctx, begin) + ql := q.newQueryLogger(query, args) + ql.logSqlQuery() + defer ql.postSqlLog(ctx, time.Now()) - return q.withLogError(errors.Wrap(q.GetContext(ctx, dest, query, args...), "error in get query")) + return ql.withLogError(errors.Wrap(q.GetContext(ctx, dest, query, args...), "error in get query")) } -type queryFmt struct { - query string - args []interface{} +func (q Q) newQueryLogger(query string, args []interface{}) *queryLogger { + return &queryLogger{Q: q, query: query, args: args} } -func (q queryFmt) String() string { - if q.args == nil { - return q.query +// sprintQ formats the query with the given args and returns the resulting string. +func sprintQ(query string, args []interface{}) string { + if args == nil { + return query } var pairs []string - for i, arg := range q.args { + for i, arg := range args { pairs = append(pairs, fmt.Sprintf("$%d", i+1), fmt.Sprintf("%v", arg)) } replacer := strings.NewReplacer(pairs...) - return replacer.Replace(q.query) + return replacer.Replace(query) +} + +// queryLogger extends Q with logging helpers for a particular query w/ args. +type queryLogger struct { + Q + + query string + args []interface{} + + str string + strOnce sync.Once +} + +func (q *queryLogger) String() string { + q.strOnce.Do(func() { + q.str = sprintQ(q.query, q.args) + }) + return q.str } -func (q Q) logSqlQuery(query string, args ...interface{}) { +func (q *queryLogger) logSqlQuery() { if q.config != nil && q.config.LogSQL() { - q.logger.Debugf("SQL: %s", queryFmt{query, args}) + q.logger.Debugw("SQL QUERY", "sql", q) } } -func (q Q) withLogError(err error) error { +func (q *queryLogger) withLogError(err error) error { if err != nil && !errors.Is(err, sql.ErrNoRows) && q.config != nil && q.config.LogSQL() { - q.logger.Errorf("SQL ERROR: %v", err) + q.logger.Errorw("SQL ERROR", "err", err, "sql", q) } return err } -func (q Q) postSqlLog(ctx context.Context, begin time.Time) { +func (q *queryLogger) postSqlLog(ctx context.Context, begin time.Time) { elapsed := time.Since(begin) if ctx.Err() != nil { - q.logger.Debugf("SQL CONTEXT CANCELLED: %d ms, err=%v", elapsed.Milliseconds(), ctx.Err()) + q.logger.Debugw("SQL CONTEXT CANCELLED", "ms", elapsed.Milliseconds(), "err", ctx.Err(), "sql", q) } timeout := q.QueryTimeout @@ -289,6 +309,6 @@ func (q Q) postSqlLog(ctx context.Context, begin time.Time) { } slowThreshold := timeout / 10 if slowThreshold > 0 && elapsed > slowThreshold { - q.logger.Warnf("SLOW SQL QUERY: %d ms", elapsed.Milliseconds()) + q.logger.Warnw("SLOW SQL QUERY", "ms", elapsed.Milliseconds(), "timeout", timeout.Milliseconds(), "sql", q) } } diff --git a/core/services/pg/q_test.go b/core/services/pg/q_test.go new file mode 100644 index 00000000000..9b32dc51632 --- /dev/null +++ b/core/services/pg/q_test.go @@ -0,0 +1,44 @@ +package pg + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_sprintQ(t *testing.T) { + for _, tt := range []struct { + name string + query string + args []interface{} + exp string + }{ + {"none", + "SELECT * FROM table;", + nil, + "SELECT * FROM table;"}, + {"one", + "SELECT $1 FROM table;", + []interface{}{"foo"}, + "SELECT foo FROM table;"}, + {"two", + "SELECT $1 FROM table WHERE bar = $2;", + []interface{}{"foo", 1}, + "SELECT foo FROM table WHERE bar = 1;"}, + {"limit", + "SELECT $1 FROM table LIMIT $2;", + []interface{}{"foo", Limit(10)}, + "SELECT foo FROM table LIMIT 10;"}, + {"limit-all", + "SELECT $1 FROM table LIMIT $2;", + []interface{}{"foo", Limit(-1)}, + "SELECT foo FROM table LIMIT NULL;"}, + } { + t.Run(tt.name, func(t *testing.T) { + got := sprintQ(tt.query, tt.args) + t.Log(tt.query, tt.args) + t.Log(got) + require.Equal(t, tt.exp, got) + }) + } +} diff --git a/core/services/pg/utils.go b/core/services/pg/utils.go index a14848f18d5..65fb35d9fa2 100644 --- a/core/services/pg/utils.go +++ b/core/services/pg/utils.go @@ -2,8 +2,10 @@ package pg import ( "context" + "database/sql/driver" "fmt" "os" + "strconv" "time" "github.com/smartcontractkit/chainlink/core/config/parse" @@ -59,3 +61,22 @@ func DefaultQueryCtx() (context.Context, context.CancelFunc) { func DefaultQueryCtxWithParent(ctx context.Context) (context.Context, context.CancelFunc) { return context.WithTimeout(ctx, DefaultQueryTimeout) } + +var _ driver.Valuer = Limit(-1) + +// Limit is a helper driver.Valuer for LIMIT queries which uses nil/NULL for negative values. +type Limit int + +func (l Limit) String() string { + if l < 0 { + return "NULL" + } + return strconv.Itoa(int(l)) +} + +func (l Limit) Value() (driver.Value, error) { + if l < 0 { + return nil, nil + } + return l, nil +} diff --git a/core/services/pipeline/common.go b/core/services/pipeline/common.go index bb37056f563..f2221651116 100644 --- a/core/services/pipeline/common.go +++ b/core/services/pipeline/common.go @@ -3,7 +3,6 @@ package pipeline import ( "context" "database/sql/driver" - "encoding/hex" "encoding/json" "math/big" "net/url" @@ -13,6 +12,7 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/common" "github.com/mitchellh/mapstructure" "github.com/pkg/errors" uuid "github.com/satori/go.uuid" @@ -71,6 +71,7 @@ var ( ErrBadInput = errors.New("bad input for task") ErrInputTaskErrored = errors.New("input task errored") ErrParameterEmpty = errors.New("parameter is empty") + ErrIndexOutOfRange = errors.New("index out of range") ErrTooManyErrors = errors.New("too many errors") ErrTimeout = errors.New("timeout") ErrTaskRunFailed = errors.New("task run failed") @@ -224,38 +225,22 @@ func (js *JSONSerializable) UnmarshalJSON(bs []byte) error { if js == nil { *js = JSONSerializable{} } - str := string(bs) - if str == "" || str == "null" { + if len(bs) == 0 { js.Valid = false return nil } - err := json.Unmarshal(bs, &js.Val) - js.Valid = err == nil + js.Valid = err == nil && js.Val != nil return err } // MarshalJSON implements custom marshaling logic func (js JSONSerializable) MarshalJSON() ([]byte, error) { if !js.Valid { - return []byte("null"), nil - } - switch x := js.Val.(type) { - case []byte: - // Don't need to HEX encode if it is a valid JSON string - if json.Valid(x) { - return json.Marshal(string(x)) - } - - // Don't need to HEX encode if it is already HEX encoded value - if utils.IsHexBytes(x) { - return json.Marshal(string(x)) - } - - return json.Marshal(hex.EncodeToString(x)) - default: - return json.Marshal(js.Val) + return json.Marshal(nil) } + jsWithHex := replaceBytesWithHex(js.Val) + return json.Marshal(jsWithHex) } func (js *JSONSerializable) Scan(value interface{}) error { @@ -458,3 +443,74 @@ func getChainByString(chainSet evm.ChainSet, str string) (evm.Chain, error) { } return chainSet.Get(id) } + +// replaceBytesWithHex replaces all []byte with hex-encoded strings +func replaceBytesWithHex(val interface{}) interface{} { + switch value := val.(type) { + case nil: + return value + case []byte: + return utils.StringToHex(string(value)) + case common.Address: + return value.Hex() + case common.Hash: + return value.Hex() + case [][]byte: + var list []string + for _, bytes := range value { + list = append(list, utils.StringToHex(string(bytes))) + } + return list + case []common.Address: + var list []string + for _, addr := range value { + list = append(list, addr.Hex()) + } + return list + case []common.Hash: + var list []string + for _, hash := range value { + list = append(list, hash.Hex()) + } + return list + case []interface{}: + if value == nil { + return value + } + var list []interface{} + for _, item := range value { + list = append(list, replaceBytesWithHex(item)) + } + return list + case map[string]interface{}: + if value == nil { + return value + } + m := make(map[string]interface{}) + for k, v := range value { + m[k] = replaceBytesWithHex(v) + } + return m + default: + // This handles solidity types: bytes1..bytes32, + // which map to [1]uint8..[32]uint8 when decoded. + // We persist them as hex strings, and we know ETH ABI encoders + // can parse hex strings, same as BytesParam does. + if s := uint8ArrayToSlice(value); s != nil { + return replaceBytesWithHex(s) + } + return value + } +} + +// uint8ArrayToSlice converts [N]uint8 array to slice. +func uint8ArrayToSlice(arr interface{}) interface{} { + t := reflect.TypeOf(arr) + if t.Kind() != reflect.Array || t.Elem().Kind() != reflect.Uint8 { + return nil + } + v := reflect.ValueOf(arr) + s := reflect.MakeSlice(reflect.SliceOf(t.Elem()), v.Len(), v.Len()) + reflect.Copy(s, v) + return s.Interface() +} diff --git a/core/services/pipeline/common_eth_fuzz_test.go b/core/services/pipeline/common_eth_fuzz_test.go new file mode 100644 index 00000000000..fa9986ae117 --- /dev/null +++ b/core/services/pipeline/common_eth_fuzz_test.go @@ -0,0 +1,19 @@ +//go:build go1.18 + +package pipeline + +import ( + "testing" +) + +func FuzzParseETHABIArgsString(f *testing.F) { + for _, tt := range testsABIDecode { + f.Add(tt.abi, false) + } + f.Fuzz(func(t *testing.T, theABI string, isLog bool) { + _, _, err := ParseETHABIArgsString([]byte(theABI), isLog) + if err != nil { + t.Skip() + } + }) +} diff --git a/core/services/pipeline/common_test.go b/core/services/pipeline/common_test.go index ccd1e7fc435..9e95f1046ba 100644 --- a/core/services/pipeline/common_test.go +++ b/core/services/pipeline/common_test.go @@ -5,11 +5,15 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" "github.com/smartcontractkit/chainlink/core/services/pipeline" + "gopkg.in/guregu/null.v4" - "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/core/internal/cltest" ) func TestTimeoutAttribute(t *testing.T) { @@ -30,7 +34,7 @@ func TestTimeoutAttribute(t *testing.T) { assert.Equal(t, false, set) } -func Test_TaskHTTPUnmarshal(t *testing.T) { +func TestTaskHTTPUnmarshal(t *testing.T) { t.Parallel() a := `ds1 [type=http allowunrestrictednetworkaccess=true method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}> timeout="10s"];` @@ -42,7 +46,7 @@ func Test_TaskHTTPUnmarshal(t *testing.T) { require.Equal(t, "true", task.AllowUnrestrictedNetworkAccess) } -func Test_TaskAnyUnmarshal(t *testing.T) { +func TestTaskAnyUnmarshal(t *testing.T) { t.Parallel() a := `ds1 [type=any failEarly=true];` @@ -54,7 +58,7 @@ func Test_TaskAnyUnmarshal(t *testing.T) { require.Equal(t, true, p.Tasks[0].Base().FailEarly) } -func Test_RetryUnmarshal(t *testing.T) { +func TestRetryUnmarshal(t *testing.T) { t.Parallel() tests := []struct { @@ -90,20 +94,26 @@ func Test_RetryUnmarshal(t *testing.T) { } for _, test := range tests { - p, err := pipeline.Parse(test.spec) - require.NoError(t, err) - require.Len(t, p.Tasks, 1) - require.Equal(t, test.retries, p.Tasks[0].TaskRetries()) - require.Equal(t, test.min, p.Tasks[0].TaskMinBackoff()) - require.Equal(t, test.max, p.Tasks[0].TaskMaxBackoff()) - } + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + p, err := pipeline.Parse(test.spec) + require.NoError(t, err) + require.Len(t, p.Tasks, 1) + require.Equal(t, test.retries, p.Tasks[0].TaskRetries()) + require.Equal(t, test.min, p.Tasks[0].TaskMinBackoff()) + require.Equal(t, test.max, p.Tasks[0].TaskMaxBackoff()) + }) + } } -func Test_UnmarshalTaskFromMap(t *testing.T) { +func TestUnmarshalTaskFromMap(t *testing.T) { t.Parallel() t.Run("returns error if task is not the right type", func(t *testing.T) { + t.Parallel() + taskMap := interface{}(nil) _, err := pipeline.UnmarshalTaskFromMap(pipeline.TaskType("http"), taskMap, 0, "foo-dot-id") require.EqualError(t, err, "UnmarshalTaskFromMap: UnmarshalTaskFromMap only accepts a map[string]interface{} or a map[string]string. Got () of type ") @@ -116,42 +126,200 @@ func Test_UnmarshalTaskFromMap(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), "UnmarshalTaskFromMap: UnmarshalTaskFromMap only accepts a map[string]interface{} or a map[string]string") }) + + t.Run("unknown task type", func(t *testing.T) { + t.Parallel() + + taskMap := map[string]string{} + _, err := pipeline.UnmarshalTaskFromMap(pipeline.TaskType("xxx"), taskMap, 0, "foo-dot-id") + require.EqualError(t, err, `UnmarshalTaskFromMap: unknown task type: "xxx"`) + }) + + tests := []struct { + taskType pipeline.TaskType + expectedTaskType interface{} + }{ + {pipeline.TaskTypeHTTP, &pipeline.HTTPTask{}}, + {pipeline.TaskTypeBridge, &pipeline.BridgeTask{}}, + {pipeline.TaskTypeMean, &pipeline.MeanTask{}}, + {pipeline.TaskTypeMedian, &pipeline.MedianTask{}}, + {pipeline.TaskTypeMode, &pipeline.ModeTask{}}, + {pipeline.TaskTypeSum, &pipeline.SumTask{}}, + {pipeline.TaskTypeMultiply, &pipeline.MultiplyTask{}}, + {pipeline.TaskTypeDivide, &pipeline.DivideTask{}}, + {pipeline.TaskTypeJSONParse, &pipeline.JSONParseTask{}}, + {pipeline.TaskTypeCBORParse, &pipeline.CBORParseTask{}}, + {pipeline.TaskTypeAny, &pipeline.AnyTask{}}, + {pipeline.TaskTypeVRF, &pipeline.VRFTask{}}, + {pipeline.TaskTypeVRFV2, &pipeline.VRFTaskV2{}}, + {pipeline.TaskTypeEstimateGasLimit, &pipeline.EstimateGasLimitTask{}}, + {pipeline.TaskTypeETHCall, &pipeline.ETHCallTask{}}, + {pipeline.TaskTypeETHTx, &pipeline.ETHTxTask{}}, + {pipeline.TaskTypeETHABIEncode, &pipeline.ETHABIEncodeTask{}}, + {pipeline.TaskTypeETHABIEncode2, &pipeline.ETHABIEncodeTask2{}}, + {pipeline.TaskTypeETHABIDecode, &pipeline.ETHABIDecodeTask{}}, + {pipeline.TaskTypeETHABIDecodeLog, &pipeline.ETHABIDecodeLogTask{}}, + {pipeline.TaskTypeMerge, &pipeline.MergeTask{}}, + {pipeline.TaskTypeLowercase, &pipeline.LowercaseTask{}}, + {pipeline.TaskTypeUppercase, &pipeline.UppercaseTask{}}, + } + + for _, test := range tests { + test := test + t.Run(string(test.taskType), func(t *testing.T) { + t.Parallel() + + taskMap := map[string]string{} + task, err := pipeline.UnmarshalTaskFromMap(test.taskType, taskMap, 0, "foo-dot-id") + require.NoError(t, err) + require.IsType(t, test.expectedTaskType, task) + }) + } } -func TestUnmarshalJSONSerializable_Valid(t *testing.T) { +func TestMarshalJSONSerializable_replaceBytesWithHex(t *testing.T) { + t.Parallel() + + type jsm = map[string]interface{} + + toJSONSerializable := func(val jsm) *pipeline.JSONSerializable { + return &pipeline.JSONSerializable{ + Valid: true, + Val: val, + } + } + + var ( + testAddr1 = common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406f111") + testAddr2 = common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406f222") + testHash1 = common.HexToHash("0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071eaf111") + testHash2 = common.HexToHash("0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071eaf222") + ) + + tests := []struct { + name string + input *pipeline.JSONSerializable + expected string + err error + }{ + {"invalid input", &pipeline.JSONSerializable{Valid: false}, "null", nil}, + {"empty object", toJSONSerializable(jsm{}), "{}", nil}, + {"byte slice", toJSONSerializable(jsm{"slice": []byte{0x10, 0x20, 0x30}}), + `{"slice":"0x102030"}`, nil}, + {"address", toJSONSerializable(jsm{"addr": testAddr1}), + `{"addr":"0x2aB9a2dc53736B361B72d900cDF9f78f9406f111"}`, nil}, + {"hash", toJSONSerializable(jsm{"hash": testHash1}), + `{"hash":"0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071eaf111"}`, nil}, + {"slice of byte slice", toJSONSerializable(jsm{"slices": [][]byte{{0x10, 0x11, 0x12}, {0x20, 0x21, 0x22}}}), + `{"slices":["0x101112","0x202122"]}`, nil}, + {"slice of addresses", toJSONSerializable(jsm{"addresses": []common.Address{testAddr1, testAddr2}}), + `{"addresses":["0x2aB9a2dc53736B361B72d900cDF9f78f9406f111","0x2aB9A2Dc53736b361b72D900CDf9f78f9406F222"]}`, nil}, + {"slice of hashes", toJSONSerializable(jsm{"hashes": []common.Hash{testHash1, testHash2}}), + `{"hashes":["0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071eaf111","0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071eaf222"]}`, nil}, + {"slice of interfaces", toJSONSerializable(jsm{"ifaces": []interface{}{[]byte{0x10, 0x11, 0x12}, []byte{0x20, 0x21, 0x22}}}), + `{"ifaces":["0x101112","0x202122"]}`, nil}, + {"map", toJSONSerializable(jsm{"map": jsm{"slice": []byte{0x10, 0x11, 0x12}, "addr": testAddr1}}), + `{"map":{"addr":"0x2aB9a2dc53736B361B72d900cDF9f78f9406f111","slice":"0x101112"}}`, nil}, + {"byte array 4", toJSONSerializable(jsm{"ba4": [4]byte{1, 2, 3, 4}}), + `{"ba4":"0x01020304"}`, nil}, + {"byte array 8", toJSONSerializable(jsm{"ba8": [8]uint8{1, 2, 3, 4, 5, 6, 7, 8}}), + `{"ba8":"0x0102030405060708"}`, nil}, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + bytes, err := test.input.MarshalJSON() + assert.Equal(t, test.expected, string(bytes)) + assert.Equal(t, test.err, errors.Cause(err)) + }) + } +} + +func TestUnmarshalJSONSerializable(t *testing.T) { + t.Parallel() + tests := []struct { name, input string expected interface{} }{ + {"null json", `null`, nil}, {"bool", `true`, true}, {"string", `"foo"`, "foo"}, {"raw", `{"foo": 42}`, map[string]interface{}{"foo": float64(42)}}, } for _, test := range tests { + test := test t.Run(test.name, func(t *testing.T) { + t.Parallel() + var i pipeline.JSONSerializable err := json.Unmarshal([]byte(test.input), &i) require.NoError(t, err) - assert.True(t, i.Valid) - assert.Equal(t, test.expected, i.Val) + if test.expected != nil { + assert.True(t, i.Valid) + assert.Equal(t, test.expected, i.Val) + } }) } } -func TestUnmarshalJSONSerializable_Invalid(t *testing.T) { +func TestCheckInputs(t *testing.T) { + t.Parallel() + + emptyPR := []pipeline.Result{} + nonEmptyPR := []pipeline.Result{ + { + Value: "foo", + Error: nil, + }, + { + Value: "err", + Error: errors.New("bar"), + }, + } + tests := []struct { - name, input string + name string + pr []pipeline.Result + minLen, maxLen, maxErrors int + err error + outputsLen int }{ - {"null json", `null`}, + {"minLen violation", emptyPR, 1, 0, 0, pipeline.ErrWrongInputCardinality, 0}, + {"maxLen violation", nonEmptyPR, 1, 1, 0, pipeline.ErrWrongInputCardinality, 0}, + {"maxErrors violation", nonEmptyPR, 1, 2, 0, pipeline.ErrTooManyErrors, 0}, + {"ok", nonEmptyPR, 1, 2, 1, nil, 1}, } for _, test := range tests { + test := test t.Run(test.name, func(t *testing.T) { - var i pipeline.JSONSerializable - err := json.Unmarshal([]byte(test.input), &i) - require.NoError(t, err) - assert.False(t, i.Valid) + t.Parallel() + + outputs, err := pipeline.CheckInputs(test.pr, test.minLen, test.maxLen, test.maxErrors) + if test.err == nil { + assert.NoError(t, err) + assert.Equal(t, test.outputsLen, len(outputs)) + } else { + assert.Equal(t, test.err, errors.Cause(err)) + } }) } } + +func TestTaskRunResult_IsPending(t *testing.T) { + t.Parallel() + + trr := &pipeline.TaskRunResult{} + assert.True(t, trr.IsPending()) + + trrWithResult := &pipeline.TaskRunResult{Result: pipeline.Result{Value: "foo"}} + assert.False(t, trrWithResult.IsPending()) + + trrWithFinishedAt := &pipeline.TaskRunResult{FinishedAt: null.NewTime(time.Now(), true)} + assert.False(t, trrWithFinishedAt.IsPending()) +} diff --git a/core/services/pipeline/getters.go b/core/services/pipeline/getters.go new file mode 100644 index 00000000000..7928c9d037a --- /dev/null +++ b/core/services/pipeline/getters.go @@ -0,0 +1,178 @@ +package pipeline + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/pkg/errors" +) + +// GetterFunc is a function that either returns a value or an error. +type GetterFunc func() (interface{}, error) + +// From creates []GetterFunc from a mix of getters or bare values. +func From(getters ...interface{}) []GetterFunc { + var gfs []GetterFunc + for _, g := range getters { + switch v := g.(type) { + case GetterFunc: + gfs = append(gfs, v) + + default: + // If a bare value is passed in, create a simple getter + gfs = append(gfs, func() (interface{}, error) { + return v, nil + }) + } + } + return gfs +} + +// NonemptyString creates a getter to ensure the string is non-empty. +func NonemptyString(s string) GetterFunc { + return func() (interface{}, error) { + trimmed := strings.TrimSpace(s) + if len(trimmed) == 0 { + return nil, ErrParameterEmpty + } + return trimmed, nil + } +} + +// Input creates a getter returning inputs[index] value, or error if index is out of range. +func Input(inputs []Result, index int) GetterFunc { + return func() (interface{}, error) { + if index < 0 || index >= len(inputs) { + return nil, ErrIndexOutOfRange + } + return inputs[index].Value, inputs[index].Error + } +} + +// Inputs creates a getter returning array of Result.Value (or Result.Error where not nil). +func Inputs(inputs []Result) GetterFunc { + return func() (interface{}, error) { + var vals []interface{} + for _, input := range inputs { + if input.Error != nil { + vals = append(vals, input.Error) + } else { + vals = append(vals, input.Value) + } + } + return vals, nil + } +} + +// VarExpr creates a getter interpolating expr value using the given Vars. +// The expression allows whitespace on both ends that will be trimmed. +// Expr examples: $(foo.bar), $(arr.1), $(bar) +func VarExpr(expr string, vars Vars) GetterFunc { + return func() (interface{}, error) { + trimmed := strings.TrimSpace(expr) + if len(trimmed) == 0 { + return nil, ErrParameterEmpty + } + isVariableExpr := strings.Count(trimmed, "$") == 1 && trimmed[:2] == "$(" && trimmed[len(trimmed)-1] == ')' + if !isVariableExpr { + return nil, ErrParameterEmpty + } + keypath := strings.TrimSpace(trimmed[2 : len(trimmed)-1]) + if len(keypath) == 0 { + return nil, ErrParameterEmpty + } + val, err := vars.Get(keypath) + if err != nil { + return nil, err + } else if as, is := val.(error); is { + return nil, errors.Wrapf(ErrTooManyErrors, "VarExpr: %v", as) + } + return val, nil + } +} + +// JSONWithVarExprs creates a getter that unmarshals jsExpr string as JSON, and +// interpolates all variables expressions found in jsExpr from Vars. +// The getter returns the unmarshalled object having expressions interpolated from Vars. +// allowErrors flag indicates if interpolating values stored in Vars can be errors. +// jsExpr example: {"requestId": $(decode_log.requestId), "payment": $(decode_log.payment)} +func JSONWithVarExprs(jsExpr string, vars Vars, allowErrors bool) GetterFunc { + return func() (interface{}, error) { + if strings.TrimSpace(jsExpr) == "" { + return nil, ErrParameterEmpty + } + const chainlinkKeyPath = "__chainlink_key_path__" + replaced := variableRegexp.ReplaceAllFunc([]byte(jsExpr), func(expr []byte) []byte { + keypathStr := strings.TrimSpace(string(expr[2 : len(expr)-1])) + return []byte(fmt.Sprintf(`{ "%s": "%s" }`, chainlinkKeyPath, keypathStr)) + }) + var val interface{} + err := json.Unmarshal(replaced, &val) + if err != nil { + return nil, errors.Wrapf(ErrBadInput, "while unmarshalling JSON: %v; js: %s", err, string(replaced)) + } + return mapGoValue(val, func(val interface{}) (interface{}, error) { + if m, is := val.(map[string]interface{}); is { + maybeKeypath, exists := m[chainlinkKeyPath] + if !exists { + return val, nil + } + keypath, is := maybeKeypath.(string) + if !is { + return nil, errors.Wrapf(ErrBadInput, fmt.Sprintf("you cannot use %s in your JSON", chainlinkKeyPath)) + } + newVal, err := vars.Get(keypath) + if err != nil { + return nil, err + } else if err, is := newVal.(error); is && !allowErrors { + return nil, errors.Wrapf(ErrBadInput, "error is not allowed: %v", err) + } + return newVal, nil + } + return val, nil + }) + } +} + +// mapGoValue iterates on v object recursively and calls fn for each value. +// Used by JSONWithVarExprs to interpolate all variables expressions. +func mapGoValue(v interface{}, fn func(val interface{}) (interface{}, error)) (x interface{}, err error) { + type item struct { + val interface{} + parentMap map[string]interface{} + parentKey string + parentSlice []interface{} + parentIdx int + } + + stack := []item{{val: v}} + var current item + + for len(stack) > 0 { + current = stack[0] + stack = stack[1:] + + val, err := fn(current.val) + if err != nil { + return nil, err + } + + if current.parentMap != nil { + current.parentMap[current.parentKey] = val + } else if current.parentSlice != nil { + current.parentSlice[current.parentIdx] = val + } + + if asMap, isMap := val.(map[string]interface{}); isMap { + for key := range asMap { + stack = append(stack, item{val: asMap[key], parentMap: asMap, parentKey: key}) + } + } else if asSlice, isSlice := val.([]interface{}); isSlice { + for i := range asSlice { + stack = append(stack, item{val: asSlice[i], parentSlice: asSlice, parentIdx: i}) + } + } + } + return v, nil +} diff --git a/core/services/pipeline/getters_test.go b/core/services/pipeline/getters_test.go new file mode 100644 index 00000000000..cb5c917f76f --- /dev/null +++ b/core/services/pipeline/getters_test.go @@ -0,0 +1,269 @@ +package pipeline_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/core/services/pipeline" +) + +func TestGetters_VarExpr(t *testing.T) { + t.Parallel() + + vars := createTestVars() + + tests := []struct { + expr string + result interface{} + err error + }{ + // no errors + {"$(foo.bar)", "value", nil}, + {" $(foo.bar)", "value", nil}, + {"$(foo.bar) ", "value", nil}, + {"$( foo.bar)", "value", nil}, + {"$(foo.bar )", "value", nil}, + {"$( foo.bar )", "value", nil}, + {" $( foo.bar )", "value", nil}, + // errors + {" ", nil, pipeline.ErrParameterEmpty}, + {"$()", nil, pipeline.ErrParameterEmpty}, + {"$(foo.bar", nil, pipeline.ErrParameterEmpty}, + {"$foo.bar)", nil, pipeline.ErrParameterEmpty}, + {"(foo.bar)", nil, pipeline.ErrParameterEmpty}, + {"foo.bar", nil, pipeline.ErrParameterEmpty}, + {"$(err)", nil, pipeline.ErrTooManyErrors}, + } + + for _, test := range tests { + test := test + + t.Run(test.expr, func(t *testing.T) { + t.Parallel() + + getter := pipeline.VarExpr(test.expr, vars) + v, err := getter() + if test.err == nil { + assert.NoError(t, err) + assert.Equal(t, test.result, v) + } else { + assert.Equal(t, test.err, errors.Cause(err)) + } + }) + } +} + +func TestGetters_JSONWithVarExprs(t *testing.T) { + t.Parallel() + + vars := createTestVars() + + errVal, err := vars.Get("err") + require.NoError(t, err) + + tests := []struct { + json string + field string + result interface{} + err error + allowErrors bool + }{ + // no errors + {`{ "x": $(zet) }`, "x", 123, nil, false}, + {`{ "x": $( zet ) }`, "x", 123, nil, false}, + {`{ "x": { "y": $(zet) } }`, "x", map[string]interface{}{"y": 123}, nil, false}, + {`{ "z": "foo" }`, "z", "foo", nil, false}, + {`{ "a": $(arr.1) }`, "a", 200, nil, false}, + {`{}`, "", map[string]interface{}{}, nil, false}, + {`{ "e": $(err) }`, "e", errVal, nil, true}, + {`null`, "", nil, nil, false}, + // errors + {` `, "", nil, pipeline.ErrParameterEmpty, false}, + {`{ "x": $(missing) }`, "x", nil, pipeline.ErrKeypathNotFound, false}, + {`{ "x": "$(zet)" }`, "x", "$(zet)", pipeline.ErrBadInput, false}, + {`{ "$(foo.bar)": $(zet) }`, "value", 123, pipeline.ErrBadInput, false}, + {`{ "x": { "__chainlink_key_path__": 0 } }`, "", nil, pipeline.ErrBadInput, false}, + {`{ "e": $(err)`, "e", nil, pipeline.ErrBadInput, false}, + } + + for _, test := range tests { + test := test + + t.Run(test.json, func(t *testing.T) { + t.Parallel() + + getter := pipeline.JSONWithVarExprs(test.json, vars, test.allowErrors) + v, err := getter() + if test.err != nil { + assert.Equal(t, test.err, errors.Cause(err)) + } else { + m, is := v.(map[string]interface{}) + if is && test.field != "" { + assert.Equal(t, test.result, m[test.field]) + } else { + assert.Equal(t, test.result, v) + } + } + }) + } +} + +func TestGetters_Input(t *testing.T) { + t.Parallel() + + t.Run("returns the requested input's Value and Error if they exist", func(t *testing.T) { + t.Parallel() + + expectedVal := "bar" + expectedErr := errors.New("some err") + val, err := pipeline.Input([]pipeline.Result{{Value: "foo"}, {Value: expectedVal, Error: expectedErr}, {Value: "baz"}}, 1)() + assert.Equal(t, expectedVal, val) + assert.Equal(t, expectedErr, err) + }) + + t.Run("returns ErrIndexOutOfRange if the specified index is out of range", func(t *testing.T) { + t.Parallel() + + _, err := pipeline.Input([]pipeline.Result{{Value: "foo"}}, 1)() + assert.Equal(t, pipeline.ErrIndexOutOfRange, errors.Cause(err)) + + _, err = pipeline.Input([]pipeline.Result{{Value: "foo"}}, -1)() + assert.Equal(t, pipeline.ErrIndexOutOfRange, errors.Cause(err)) + }) +} + +func TestGetters_Inputs(t *testing.T) { + t.Parallel() + + theErr := errors.New("some issue") + + tests := []struct { + name string + inputs []pipeline.Result + expected []interface{} + expectedErr error + }{ + { + "returns the values and errors", + []pipeline.Result{ + {Value: "foo"}, + {Error: theErr}, + {Value: "baz"}, + }, + []interface{}{"foo", theErr, "baz"}, + nil, + }, + { + "returns nil array", + []pipeline.Result{}, + nil, + nil, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + val, err := pipeline.Inputs(test.inputs)() + assert.Equal(t, test.expectedErr, errors.Cause(err)) + assert.Equal(t, test.expected, val) + }) + } +} + +func TestGetters_NonemptyString(t *testing.T) { + t.Parallel() + + t.Run("returns any non-empty string", func(t *testing.T) { + t.Parallel() + + val, err := pipeline.NonemptyString("foo bar")() + assert.NoError(t, err) + assert.Equal(t, "foo bar", val) + }) + + t.Run("returns ErrParameterEmpty when given an empty string (including only spaces)", func(t *testing.T) { + t.Parallel() + + _, err := pipeline.NonemptyString("")() + assert.Equal(t, pipeline.ErrParameterEmpty, errors.Cause(err)) + _, err = pipeline.NonemptyString(" ")() + assert.Equal(t, pipeline.ErrParameterEmpty, errors.Cause(err)) + }) +} + +func TestGetters_From(t *testing.T) { + t.Parallel() + + t.Run("no inputs", func(t *testing.T) { + t.Parallel() + + getters := pipeline.From() + assert.Empty(t, getters) + }) + + var fooGetter1 pipeline.GetterFunc = func() (interface{}, error) { + return "foo", nil + } + var fooGetter2 pipeline.GetterFunc = func() (interface{}, error) { + return "foo", nil + } + + tests := []struct { + name string + input []interface{} + expected string + }{ + { + "only getters", + []interface{}{fooGetter1, fooGetter2}, + "foo", + }, + { + "mix of getters and values", + []interface{}{fooGetter1, "foo"}, + "foo", + }, + { + "only values", + []interface{}{"foo", "foo"}, + "foo", + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + getters := pipeline.From(test.input...) + assert.Len(t, getters, 2) + + for _, getter := range getters { + val, err := getter() + assert.NoError(t, err) + assert.Equal(t, test.expected, val) + } + }) + } +} + +func createTestVars() pipeline.Vars { + return pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "bar": "value", + }, + "zet": 123, + "arr": []interface{}{ + 100, 200, 300, + }, + "err": errors.New("some error"), + }) +} diff --git a/core/services/pipeline/graph.go b/core/services/pipeline/graph.go index 92bdbefca0b..252d6389d43 100644 --- a/core/services/pipeline/graph.go +++ b/core/services/pipeline/graph.go @@ -1,6 +1,7 @@ package pipeline import ( + "fmt" "regexp" "sort" "strings" @@ -36,6 +37,11 @@ func (g *Graph) UnmarshalText(bs []byte) (err error) { if g.DirectedGraph == nil { g.DirectedGraph = simple.NewDirectedGraph() } + defer func() { + if rerr := recover(); rerr != nil { + err = fmt.Errorf("could not unmarshal DOT into a pipeline.Graph: %v", rerr) + } + }() bs = append([]byte("digraph {\n"), bs...) bs = append(bs, []byte("\n}")...) err = dot.Unmarshal(bs, g) diff --git a/core/services/pipeline/graph_fuzz_test.go b/core/services/pipeline/graph_fuzz_test.go new file mode 100644 index 00000000000..2579c4bd66d --- /dev/null +++ b/core/services/pipeline/graph_fuzz_test.go @@ -0,0 +1,68 @@ +//go:build go1.18 + +package pipeline_test + +import ( + "testing" + + "github.com/smartcontractkit/chainlink/core/services/keeper" + "github.com/smartcontractkit/chainlink/core/services/pipeline" +) + +func FuzzParse(f *testing.F) { + f.Add(`ds1 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}> timeout="10s"];`) + f.Add(`ds1 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}>];`) + f.Add(`ds1 [type=http allowunrestrictednetworkaccess=true method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}> timeout="10s"];`) + f.Add(`ds1 [type=any failEarly=true];`) + f.Add(`ds1 [type=any];`) + f.Add(`ds1 [type=any retries=5];`) + f.Add(`ds1 [type=http retries=10 minBackoff="1s" maxBackoff="30m"];`) + f.Add(pipeline.DotStr) + f.Add(keeper.ExpectedObservationSource) + f.Add(CBORDietEmpty) + f.Add(CBORStdString) + f.Add(` + a [type=bridge]; + b [type=multiply times=1.23]; + a -> b -> a; + `) + f.Add(` +a [type=multiply input="$(val)" times=2] +b1 [type=multiply input="$(a)" times=2] +b2 [type=multiply input="$(a)" times=3] +c [type=median values=<[ $(b1), $(b2) ]> index=0] +a->b1->c; +a->b2->c;`) + f.Add(` +// data source 1 +ds1 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}"]; +ds1_parse [type=jsonparse path="latest"]; + +// data source 2 +ds2 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}"]; +ds2_parse [type=jsonparse path="latest"]; + +ds1 -> ds1_parse -> answer1; +ds2 -> ds2_parse -> answer1; + +answer1 [type=median index=0]; +`) + f.Add(taskRunWithVars{ + bridgeName: "testBridge", + ds2URL: "https://example.com/path/to/service?with=args&foo=bar", + ds4URL: "http://chain.link", + submitBridgeName: "testSubmitBridge", + includeInputAtKey: "path.to.key", + }.String()) + f.Add(`s->s`) + f.Add(`0->s->s`) + f.Fuzz(func(t *testing.T, spec string) { + if len(spec) > 1_000_000 { + t.Skip() + } + _, err := pipeline.Parse(spec) + if err != nil { + t.Skip() + } + }) +} diff --git a/core/services/pipeline/helpers_test.go b/core/services/pipeline/helpers_test.go index 3175a89edc7..ace21b01fc8 100644 --- a/core/services/pipeline/helpers_test.go +++ b/core/services/pipeline/helpers_test.go @@ -3,12 +3,9 @@ package pipeline import ( uuid "github.com/satori/go.uuid" - "github.com/smartcontractkit/chainlink/core/chains/evm" "github.com/smartcontractkit/sqlx" -) -var ( - NewKeypathFromString = newKeypathFromString + "github.com/smartcontractkit/chainlink/core/chains/evm" ) const ( diff --git a/core/services/pipeline/keypath.go b/core/services/pipeline/keypath.go new file mode 100644 index 00000000000..10bbd058d91 --- /dev/null +++ b/core/services/pipeline/keypath.go @@ -0,0 +1,45 @@ +package pipeline + +import ( + "strings" + + "github.com/pkg/errors" +) + +var ( + ErrWrongKeypath = errors.New("wrong keypath format") +) + +const KeypathSeparator = "." + +// Keypath contains keypath parsed by NewKeypathFromString. +type Keypath struct { + NumParts int // can be 0, 1 or 2 + Part0 string // can be empty string if NumParts is 0 + Part1 string // can be empty string if NumParts is 0 or 1 +} + +// NewKeypathFromString creates a new Keypath from the given string. +// Returns error if it fails to parse the given keypath string. +func NewKeypathFromString(keypathStr string) (Keypath, error) { + if len(keypathStr) == 0 { + return Keypath{}, nil + } + + parts := strings.Split(keypathStr, KeypathSeparator) + + switch len(parts) { + case 0: + return Keypath{}, errors.Wrapf(ErrWrongKeypath, "empty keypath") + case 1: + if len(parts[0]) > 0 { + return Keypath{1, parts[0], ""}, nil + } + case 2: + if len(parts[0]) > 0 && len(parts[1]) > 0 { + return Keypath{2, parts[0], parts[1]}, nil + } + } + + return Keypath{}, errors.Wrapf(ErrWrongKeypath, "while parsing keypath '%v'", keypathStr) +} diff --git a/core/services/pipeline/keypath_test.go b/core/services/pipeline/keypath_test.go new file mode 100644 index 00000000000..d6ff7619767 --- /dev/null +++ b/core/services/pipeline/keypath_test.go @@ -0,0 +1,51 @@ +package pipeline_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink/core/services/pipeline" +) + +func TestKeypath(t *testing.T) { + t.Parallel() + + t.Run("can be constructed from a period-delimited string with 2 or fewer parts", func(t *testing.T) { + t.Parallel() + + kp, err := pipeline.NewKeypathFromString("") + assert.NoError(t, err) + assert.Equal(t, pipeline.Keypath{}, kp) + + kp, err = pipeline.NewKeypathFromString("foo") + assert.NoError(t, err) + assert.Equal(t, pipeline.Keypath{NumParts: 1, Part0: "foo"}, kp) + + kp, err = pipeline.NewKeypathFromString("foo.bar") + assert.NoError(t, err) + assert.Equal(t, pipeline.Keypath{NumParts: 2, Part0: "foo", Part1: "bar"}, kp) + }) + + t.Run("wrong keypath", func(t *testing.T) { + t.Parallel() + + wrongKeyPath := []string{ + ".", + "..", + "x.", + ".y", + "x.y.", + "x.y.z", + } + + for _, keypath := range wrongKeyPath { + t.Run(keypath, func(t *testing.T) { + t.Parallel() + + _, err := pipeline.NewKeypathFromString(keypath) + assert.ErrorIs(t, err, pipeline.ErrWrongKeypath) + }) + } + }) +} diff --git a/core/services/pipeline/mocks/config.go b/core/services/pipeline/mocks/config.go index c3ab733f60d..fdcd7dd5af0 100644 --- a/core/services/pipeline/mocks/config.go +++ b/core/services/pipeline/mocks/config.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/pipeline/mocks/eth_key_store.go b/core/services/pipeline/mocks/eth_key_store.go index fb20e063da5..07114a95c35 100644 --- a/core/services/pipeline/mocks/eth_key_store.go +++ b/core/services/pipeline/mocks/eth_key_store.go @@ -1,8 +1,10 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks import ( + big "math/big" + common "github.com/ethereum/go-ethereum/common" mock "github.com/stretchr/testify/mock" ) @@ -12,19 +14,20 @@ type ETHKeyStore struct { mock.Mock } -// GetRoundRobinAddress provides a mock function with given fields: addrs -func (_m *ETHKeyStore) GetRoundRobinAddress(addrs ...common.Address) (common.Address, error) { +// GetRoundRobinAddress provides a mock function with given fields: chainID, addrs +func (_m *ETHKeyStore) GetRoundRobinAddress(chainID *big.Int, addrs ...common.Address) (common.Address, error) { _va := make([]interface{}, len(addrs)) for _i := range addrs { _va[_i] = addrs[_i] } var _ca []interface{} + _ca = append(_ca, chainID) _ca = append(_ca, _va...) ret := _m.Called(_ca...) var r0 common.Address - if rf, ok := ret.Get(0).(func(...common.Address) common.Address); ok { - r0 = rf(addrs...) + if rf, ok := ret.Get(0).(func(*big.Int, ...common.Address) common.Address); ok { + r0 = rf(chainID, addrs...) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(common.Address) @@ -32,8 +35,8 @@ func (_m *ETHKeyStore) GetRoundRobinAddress(addrs ...common.Address) (common.Add } var r1 error - if rf, ok := ret.Get(1).(func(...common.Address) error); ok { - r1 = rf(addrs...) + if rf, ok := ret.Get(1).(func(*big.Int, ...common.Address) error); ok { + r1 = rf(chainID, addrs...) } else { r1 = ret.Error(1) } diff --git a/core/services/pipeline/mocks/orm.go b/core/services/pipeline/mocks/orm.go index 194ef655b32..5253bdcdc37 100644 --- a/core/services/pipeline/mocks/orm.go +++ b/core/services/pipeline/mocks/orm.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks @@ -192,6 +192,27 @@ func (_m *ORM) InsertFinishedRun(run *pipeline.Run, saveSuccessfulTaskRuns bool, return r0 } +// InsertFinishedRuns provides a mock function with given fields: run, saveSuccessfulTaskRuns, qopts +func (_m *ORM) InsertFinishedRuns(run []*pipeline.Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, run, saveSuccessfulTaskRuns) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 error + if rf, ok := ret.Get(0).(func([]*pipeline.Run, bool, ...pg.QOpt) error); ok { + r0 = rf(run, saveSuccessfulTaskRuns, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // InsertRun provides a mock function with given fields: run, qopts func (_m *ORM) InsertRun(run *pipeline.Run, qopts ...pg.QOpt) error { _va := make([]interface{}, len(qopts)) diff --git a/core/services/pipeline/mocks/pipeline_param_unmarshaler.go b/core/services/pipeline/mocks/pipeline_param_unmarshaler.go index cf60aadbf20..f6d9b6b0001 100644 --- a/core/services/pipeline/mocks/pipeline_param_unmarshaler.go +++ b/core/services/pipeline/mocks/pipeline_param_unmarshaler.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/pipeline/mocks/runner.go b/core/services/pipeline/mocks/runner.go index 25b27990ecb..8803a8d2a4f 100644 --- a/core/services/pipeline/mocks/runner.go +++ b/core/services/pipeline/mocks/runner.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks @@ -127,6 +127,27 @@ func (_m *Runner) InsertFinishedRun(run *pipeline.Run, saveSuccessfulTaskRuns bo return r0 } +// InsertFinishedRuns provides a mock function with given fields: runs, saveSuccessfulTaskRuns, qopts +func (_m *Runner) InsertFinishedRuns(runs []*pipeline.Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, runs, saveSuccessfulTaskRuns) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 error + if rf, ok := ret.Get(0).(func([]*pipeline.Run, bool, ...pg.QOpt) error); ok { + r0 = rf(runs, saveSuccessfulTaskRuns, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // OnRunFinished provides a mock function with given fields: _a0 func (_m *Runner) OnRunFinished(_a0 func(*pipeline.Run)) { _m.Called(_a0) diff --git a/core/services/pipeline/mocks/task.go b/core/services/pipeline/mocks/task.go index 211baf00d42..eab8def2d0d 100644 --- a/core/services/pipeline/mocks/task.go +++ b/core/services/pipeline/mocks/task.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/pipeline/orm.go b/core/services/pipeline/orm.go index a0c5f551d19..5459ca785e7 100644 --- a/core/services/pipeline/orm.go +++ b/core/services/pipeline/orm.go @@ -24,6 +24,11 @@ type ORM interface { StoreRun(run *Run, qopts ...pg.QOpt) (restart bool, err error) UpdateTaskRunResult(taskID uuid.UUID, result Result) (run Run, start bool, err error) InsertFinishedRun(run *Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) (err error) + + // InsertFinishedRuns inserts all the given runs into the database. + // If saveSuccessfulTaskRuns is false, only errored runs are saved. + InsertFinishedRuns(run []*Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) (err error) + DeleteRunsOlderThan(context.Context, time.Duration) error FindRun(id int64) (Run, error) GetAllRuns() ([]Run, error) @@ -221,10 +226,56 @@ func (o *orm) UpdateTaskRunResult(taskID uuid.UUID, result Result) (run Run, sta return run, start, err } -// If saveSuccessfulTaskRuns = false, we only save errored runs. -// That way if the job is run frequently (such as OCR) we avoid saving a large number of successful task runs -// which do not provide much value. -func (o *orm) InsertFinishedRun(run *Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) (err error) { +// InsertFinishedRuns inserts all the given runs into the database. +func (o *orm) InsertFinishedRuns(runs []*Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + err := q.Transaction(func(tx pg.Queryer) error { + pipelineRunsQuery := ` +INSERT INTO pipeline_runs + (pipeline_spec_id, meta, all_errors, fatal_errors, inputs, outputs, created_at, finished_at, state) +VALUES + (:pipeline_spec_id, :meta, :all_errors, :fatal_errors, :inputs, :outputs, :created_at, :finished_at, :state) +RETURNING id + ` + rows, errQ := tx.NamedQuery(pipelineRunsQuery, runs) + if errQ != nil { + return errors.Wrap(errQ, "inserting finished pipeline runs") + } + + var runIDs []int64 + for rows.Next() { + var runID int64 + if errS := rows.Scan(&runID); errS != nil { + return errors.Wrap(errS, "scanning pipeline runs id row") + } + runIDs = append(runIDs, runID) + } + + for i, run := range runs { + for j := range run.PipelineTaskRuns { + run.PipelineTaskRuns[j].PipelineRunID = runIDs[i] + } + } + + pipelineTaskRunsQuery := ` +INSERT INTO pipeline_task_runs (pipeline_run_id, id, type, index, output, error, dot_id, created_at, finished_at) +VALUES (:pipeline_run_id, :id, :type, :index, :output, :error, :dot_id, :created_at, :finished_at); + ` + var pipelineTaskRuns []TaskRun + for _, run := range runs { + if !saveSuccessfulTaskRuns && !run.HasErrors() { + continue + } + pipelineTaskRuns = append(pipelineTaskRuns, run.PipelineTaskRuns...) + } + + _, errE := tx.NamedExec(pipelineTaskRunsQuery, pipelineTaskRuns) + return errors.Wrap(errE, "insert pipeline task runs") + }) + return errors.Wrap(err, "InsertFinishedRuns failed") +} + +func (o *orm) checkFinishedRun(run *Run, saveSuccessfulTaskRuns bool) error { if run.CreatedAt.IsZero() { return errors.New("run.CreatedAt must be set") } @@ -237,6 +288,17 @@ func (o *orm) InsertFinishedRun(run *Run, saveSuccessfulTaskRuns bool, qopts ... if len(run.PipelineTaskRuns) == 0 && (saveSuccessfulTaskRuns || run.HasErrors()) { return errors.New("must provide task run results") } + return nil +} + +// InsertFinishedRun inserts the given run into the database. +// If saveSuccessfulTaskRuns = false, we only save errored runs. +// That way if the job is run frequently (such as OCR) we avoid saving a large number of successful task runs +// which do not provide much value. +func (o *orm) InsertFinishedRun(run *Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) (err error) { + if err = o.checkFinishedRun(run, saveSuccessfulTaskRuns); err != nil { + return err + } q := o.q.WithOpts(qopts...) err = q.Transaction(func(tx pg.Queryer) error { diff --git a/core/services/pipeline/orm_test.go b/core/services/pipeline/orm_test.go index eb9df983734..f97a40d76e6 100644 --- a/core/services/pipeline/orm_test.go +++ b/core/services/pipeline/orm_test.go @@ -111,6 +111,60 @@ answer2 [type=bridge name=election_winner index=1]; return run } +func TestInsertFinishedRuns(t *testing.T) { + db, orm := setupORM(t) + + _, err := db.Exec(`SET CONSTRAINTS pipeline_runs_pipeline_spec_id_fkey DEFERRED`) + require.NoError(t, err) + + var runs []*pipeline.Run + for i := 0; i < 3; i++ { + now := time.Now() + r := pipeline.Run{ + State: pipeline.RunStatusRunning, + AllErrors: pipeline.RunErrors{}, + FatalErrors: pipeline.RunErrors{}, + CreatedAt: now, + FinishedAt: null.Time{}, + Outputs: pipeline.JSONSerializable{}, + } + + require.NoError(t, orm.InsertRun(&r)) + + r.PipelineTaskRuns = []pipeline.TaskRun{ + { + ID: uuid.NewV4(), + PipelineRunID: r.ID, + Type: "bridge", + DotID: "ds1", + CreatedAt: now, + FinishedAt: null.TimeFrom(now.Add(100 * time.Millisecond)), + }, + { + ID: uuid.NewV4(), + PipelineRunID: r.ID, + Type: "median", + DotID: "answer2", + Output: pipeline.JSONSerializable{Val: 1, Valid: true}, + CreatedAt: now, + FinishedAt: null.TimeFrom(now.Add(200 * time.Millisecond)), + }, + } + r.FinishedAt = null.TimeFrom(now.Add(300 * time.Millisecond)) + r.Outputs = pipeline.JSONSerializable{ + Val: "stuff", + Valid: true, + } + r.FatalErrors = append(r.AllErrors, null.NewString("", false)) + r.State = pipeline.RunStatusCompleted + runs = append(runs, &r) + } + + err = orm.InsertFinishedRuns(runs, true) + require.NoError(t, err) + +} + // Tests that inserting run results, then later updating the run results via upsert will work correctly. func Test_PipelineORM_StoreRun_ShouldUpsert(t *testing.T) { _, orm := setupORM(t) diff --git a/core/services/pipeline/runner.go b/core/services/pipeline/runner.go index 7bba39ec5d2..dcfb79df376 100644 --- a/core/services/pipeline/runner.go +++ b/core/services/pipeline/runner.go @@ -38,6 +38,7 @@ type Runner interface { ExecuteRun(ctx context.Context, spec Spec, vars Vars, l logger.Logger) (run Run, trrs TaskRunResults, err error) // InsertFinishedRun saves the run results in the database. InsertFinishedRun(run *Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error + InsertFinishedRuns(runs []*Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error // ExecuteAndInsertFinishedRun executes a new run in-memory according to a spec, persists and saves the results. // It is a combination of ExecuteRun and InsertFinishedRun. @@ -578,6 +579,10 @@ func (r *runner) InsertFinishedRun(run *Run, saveSuccessfulTaskRuns bool, qopts return r.orm.InsertFinishedRun(run, saveSuccessfulTaskRuns, qopts...) } +func (r *runner) InsertFinishedRuns(runs []*Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { + return r.orm.InsertFinishedRuns(runs, saveSuccessfulTaskRuns, qopts...) +} + func (r *runner) runReaper() { ctx, cancel := utils.ContextFromChan(r.chStop) defer cancel() diff --git a/core/services/pipeline/runner_test.go b/core/services/pipeline/runner_test.go index 08fc653f08f..ddb92233e1e 100644 --- a/core/services/pipeline/runner_test.go +++ b/core/services/pipeline/runner_test.go @@ -14,11 +14,14 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/shopspring/decimal" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v4" - "github.com/shopspring/decimal" + "github.com/smartcontractkit/sqlx" + "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/testutils/configtest" "github.com/smartcontractkit/chainlink/core/internal/testutils/evmtest" @@ -28,8 +31,6 @@ import ( "github.com/smartcontractkit/chainlink/core/services/pipeline" "github.com/smartcontractkit/chainlink/core/services/pipeline/mocks" "github.com/smartcontractkit/chainlink/core/utils" - "github.com/smartcontractkit/sqlx" - "github.com/stretchr/testify/require" ) func newRunner(t testing.TB, db *sqlx.DB, cfg *configtest.TestGeneralConfig) (pipeline.Runner, *mocks.ORM) { @@ -122,10 +123,15 @@ ds5 [type=http method="GET" url="%s" index=2] require.Len(t, errorResults, 3) } -func Test_PipelineRunner_ExecuteTaskRunsWithVars(t *testing.T) { - t.Parallel() +type taskRunWithVars struct { + bridgeName string + ds2URL, ds4URL string + submitBridgeName string + includeInputAtKey string +} - specTemplate := ` +func (t taskRunWithVars) String() string { + return fmt.Sprintf(` ds1 [type=bridge name="%s" timeout=0 requestData=<{"data": $(foo)}>] ds1_parse [type=jsonparse lax=false path="data,result" data="$(ds1)"] ds1_multiply [type=multiply input="$(ds1_parse.result)" times="$(ds1_parse.times)"] @@ -155,7 +161,11 @@ func Test_PipelineRunner_ExecuteTaskRunsWithVars(t *testing.T) { median -> submit; ds4 -> submit; - ` + `, t.bridgeName, t.ds2URL, t.ds4URL, t.submitBridgeName, t.includeInputAtKey) +} + +func Test_PipelineRunner_ExecuteTaskRunsWithVars(t *testing.T) { + t.Parallel() tests := []struct { name string @@ -245,7 +255,13 @@ func Test_PipelineRunner_ExecuteTaskRunsWithVars(t *testing.T) { defer submit.Close() runner, _ := newRunner(t, db, cfg) - specStr := fmt.Sprintf(specTemplate, bridgeName, ds2.URL, ds4.URL, submitBridgeName, test.includeInputAtKey) + specStr := taskRunWithVars{ + bridgeName: bridgeName, + ds2URL: ds2.URL, + ds4URL: ds4.URL, + submitBridgeName: submitBridgeName, + includeInputAtKey: test.includeInputAtKey, + }.String() p, err := pipeline.Parse(specStr) require.NoError(t, err) @@ -288,13 +304,8 @@ func Test_PipelineRunner_ExecuteTaskRunsWithVars(t *testing.T) { } } -func Test_PipelineRunner_CBORParse(t *testing.T) { - db := pgtest.NewSqlxDB(t) - cfg := cltest.NewTestGeneralConfig(t) - r, _ := newRunner(t, db, cfg) - - t.Run("diet mode, empty CBOR", func(t *testing.T) { - s := ` +const ( + CBORDietEmpty = ` decode_log [type="ethabidecodelog" data="$(jobRun.logData)" topics="$(jobRun.logTopics)" @@ -306,6 +317,27 @@ decode_cbor [type="cborparse" decode_log -> decode_cbor; ` + CBORStdString = ` +decode_log [type="ethabidecodelog" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)" + abi="OracleRequest(address requester, bytes32 requestId, uint256 payment, address callbackAddr, bytes4 callbackFunctionId, uint256 cancelExpiration, uint256 dataVersion, bytes cborPayload)"] + +decode_cbor [type="cborparse" + data="$(decode_log.cborPayload)" + mode=standard] + +decode_log -> decode_cbor; +` +) + +func Test_PipelineRunner_CBORParse(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := cltest.NewTestGeneralConfig(t) + r, _ := newRunner(t, db, cfg) + + t.Run("diet mode, empty CBOR", func(t *testing.T) { + s := CBORDietEmpty d, err := pipeline.Parse(s) require.NoError(t, err) @@ -333,18 +365,7 @@ decode_log -> decode_cbor; }) t.Run("standard mode, string value", func(t *testing.T) { - s := ` -decode_log [type="ethabidecodelog" - data="$(jobRun.logData)" - topics="$(jobRun.logTopics)" - abi="OracleRequest(address requester, bytes32 requestId, uint256 payment, address callbackAddr, bytes4 callbackFunctionId, uint256 cancelExpiration, uint256 dataVersion, bytes cborPayload)"] - -decode_cbor [type="cborparse" - data="$(decode_log.cborPayload)" - mode=standard] - -decode_log -> decode_cbor; -` + s := CBORStdString d, err := pipeline.Parse(s) require.NoError(t, err) diff --git a/core/services/pipeline/scheduler.go b/core/services/pipeline/scheduler.go index 8d02cc09c7c..007a93418be 100644 --- a/core/services/pipeline/scheduler.go +++ b/core/services/pipeline/scheduler.go @@ -149,10 +149,14 @@ func (s *scheduler) reconstructResults() { } // store the result in vars + var err error if result.Error != nil { - s.vars.Set(task.DotID(), result.Error) + err = s.vars.Set(task.DotID(), result.Error) } else { - s.vars.Set(task.DotID(), result.Value) + err = s.vars.Set(task.DotID(), result.Value) + } + if err != nil { + s.logger.Panicf("Vars.Set error: %v", err) } // mark all outputs as complete @@ -195,10 +199,14 @@ func (s *scheduler) Run() { } // store the result in vars + var err error if result.Result.Error != nil { - s.vars.Set(result.Task.DotID(), result.Result.Error) + err = s.vars.Set(result.Task.DotID(), result.Result.Error) } else { - s.vars.Set(result.Task.DotID(), result.Result.Value) + err = s.vars.Set(result.Task.DotID(), result.Result.Value) + } + if err != nil { + s.logger.Panicf("Vars.Set error: %v", err) } // if the task was marked as fail early, and the result is a fail diff --git a/core/services/pipeline/task.cborparse.go b/core/services/pipeline/task.cborparse.go index e4bcee547ae..408598c360f 100644 --- a/core/services/pipeline/task.cborparse.go +++ b/core/services/pipeline/task.cborparse.go @@ -4,9 +4,10 @@ import ( "context" "github.com/pkg/errors" - "github.com/smartcontractkit/chainlink/core/cbor" "go.uber.org/multierr" + "github.com/smartcontractkit/chainlink/core/cbor" + "github.com/smartcontractkit/chainlink/core/logger" ) @@ -55,15 +56,11 @@ func (t *CBORParseTask) Run(_ context.Context, _ logger.Logger, vars Vars, input // NOTE: In diet mode, cbor_parse ASSUMES that the incoming CBOR is a // map. In the case that data is entirely missing, we assume it was the // empty map - parsed, err := cbor.ParseDietCBOR([]byte(data)) + parsed, err := cbor.ParseDietCBOR(data) if err != nil { return Result{Error: errors.Wrapf(ErrBadInput, "CBORParse: data: %v", err)}, runInfo } - m, ok := parsed.Result.Value().(map[string]interface{}) - if !ok { - return Result{Error: errors.Wrapf(ErrBadInput, "CBORParse: data: expected map[string]interface{}, got %T", parsed.Result.Value())}, runInfo - } - return Result{Value: m}, runInfo + return Result{Value: parsed}, runInfo case "standard": parsed, err := cbor.ParseStandardCBOR([]byte(data)) if err != nil { diff --git a/core/services/pipeline/task.cborparse_test.go b/core/services/pipeline/task.cborparse_test.go index 53c2c481cbf..c0d8c9f2b8e 100644 --- a/core/services/pipeline/task.cborparse_test.go +++ b/core/services/pipeline/task.cborparse_test.go @@ -7,6 +7,7 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/pipeline" ) @@ -97,10 +98,10 @@ func TestCBORParseTask(t *testing.T) { nil, map[string]interface{}{ "bignums": []interface{}{ - float64(18446744073709551616), - float64(28948022309329048855892746252171976963317496166410141009864396001978282409984), - float64(-18446744073709551617), - float64(-28948022309329048855892746252171976963317496166410141009864396001978282409984), + testutils.MustParseBigInt(t, "18446744073709551616"), + testutils.MustParseBigInt(t, "28948022309329048855892746252171976963317496166410141009864396001978282409984"), + testutils.MustParseBigInt(t, "-18446744073709551617"), + testutils.MustParseBigInt(t, "-28948022309329048855892746252171976963317496166410141009864396001978282409984"), }, }, nil, diff --git a/core/services/pipeline/task.eth_abi_decode_test.go b/core/services/pipeline/task.eth_abi_decode_test.go index f3e5961ac04..c8338a06deb 100644 --- a/core/services/pipeline/task.eth_abi_decode_test.go +++ b/core/services/pipeline/task.eth_abi_decode_test.go @@ -1,4 +1,4 @@ -package pipeline_test +package pipeline import ( "context" @@ -12,108 +12,107 @@ import ( "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink/core/logger" - "github.com/smartcontractkit/chainlink/core/services/pipeline" "github.com/smartcontractkit/chainlink/core/utils" ) -func TestETHABIDecodeTask(t *testing.T) { - tests := []struct { - name string - abi string - data string - vars pipeline.Vars - inputs []pipeline.Result - expected map[string]interface{} - expectedErrorCause error - expectedErrorContains string - }{ - { - "uint256, bool, int256, string", - "uint256 u, bool b, int256 i, string s", - "$(foo)", - pipeline.NewVarsFrom(map[string]interface{}{ - "foo": "0x000000000000000000000000000000000000000000000000000000000000007b0000000000000000000000000000000000000000000000000000000000000001fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebf0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000b666f6f206261722062617a000000000000000000000000000000000000000000", - }), - nil, - map[string]interface{}{ - "u": big.NewInt(123), - "b": true, - "i": big.NewInt(-321), - "s": "foo bar baz", - }, - nil, - "", +var testsABIDecode = []struct { + name string + abi string + data string + vars Vars + inputs []Result + expected map[string]interface{} + expectedErrorCause error + expectedErrorContains string +}{ + { + "uint256, bool, int256, string", + "uint256 u, bool b, int256 i, string s", + "$(foo)", + NewVarsFrom(map[string]interface{}{ + "foo": "0x000000000000000000000000000000000000000000000000000000000000007b0000000000000000000000000000000000000000000000000000000000000001fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebf0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000b666f6f206261722062617a000000000000000000000000000000000000000000", + }), + nil, + map[string]interface{}{ + "u": big.NewInt(123), + "b": true, + "i": big.NewInt(-321), + "s": "foo bar baz", }, - { - "bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth", - "bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth", - "$(foo)", - pipeline.NewVarsFrom(map[string]interface{}{ - "foo": "0x00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000002cc18069c8a2800000000000000000000000000000000000000000000000000000000000002625a000000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000bebc20000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000", - }), - nil, - map[string]interface{}{ - "performData": []uint8{0x0}, - "maxLinkPayment": big.NewInt(3225000000000000000), - "gasLimit": big.NewInt(2500000), - "adjustedGasWei": big.NewInt(200), - "linkEth": big.NewInt(200000000), - }, - nil, - "", + nil, + "", + }, + { + "bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth", + "bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth", + "$(foo)", + NewVarsFrom(map[string]interface{}{ + "foo": "0x00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000002cc18069c8a2800000000000000000000000000000000000000000000000000000000000002625a000000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000bebc20000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000", + }), + nil, + map[string]interface{}{ + "performData": []uint8{0x0}, + "maxLinkPayment": big.NewInt(3225000000000000000), + "gasLimit": big.NewInt(2500000), + "adjustedGasWei": big.NewInt(200), + "linkEth": big.NewInt(200000000), }, - { - "weird spaces / address, uint80[3][], bytes, bytes32", - "address a , uint80[3][] u , bytes b, bytes32 b32 ", - "$(foo)", - pipeline.NewVarsFrom(map[string]interface{}{ - "foo": "0x000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001607374657665746f7368692073657267616d6f746f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000005c000000000000000000000000000000000000000000000000000000000000003d000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000002100000000000000000000000000000000000000000000000000000000000000420000000000000000000000000000000000000000000000000000000000000063000000000000000000000000000000000000000000000000000000000000000c666f6f206261722062617a0a0000000000000000000000000000000000000000", - }), - nil, - map[string]interface{}{ - "a": common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), - "u": [][3]*big.Int{ - {big.NewInt(92), big.NewInt(61), big.NewInt(30)}, - {big.NewInt(33), big.NewInt(66), big.NewInt(99)}, - }, - "b": hexutil.MustDecode("0x666f6f206261722062617a0a"), - "b32": utils.Bytes32FromString("stevetoshi sergamoto"), + nil, + "", + }, + { + "weird spaces / address, uint80[3][], bytes, bytes32", + "address a , uint80[3][] u , bytes b, bytes32 b32 ", + "$(foo)", + NewVarsFrom(map[string]interface{}{ + "foo": "0x000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001607374657665746f7368692073657267616d6f746f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000005c000000000000000000000000000000000000000000000000000000000000003d000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000002100000000000000000000000000000000000000000000000000000000000000420000000000000000000000000000000000000000000000000000000000000063000000000000000000000000000000000000000000000000000000000000000c666f6f206261722062617a0a0000000000000000000000000000000000000000", + }), + nil, + map[string]interface{}{ + "a": common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), + "u": [][3]*big.Int{ + {big.NewInt(92), big.NewInt(61), big.NewInt(30)}, + {big.NewInt(33), big.NewInt(66), big.NewInt(99)}, }, - nil, - "", - }, - { - "no attribute names", - "address, bytes32", - "$(foo)", - pipeline.NewVarsFrom(map[string]interface{}{ - "foo": "0x000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001607374657665746f7368692073657267616d6f746f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000005c000000000000000000000000000000000000000000000000000000000000003d000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000002100000000000000000000000000000000000000000000000000000000000000420000000000000000000000000000000000000000000000000000000000000063000000000000000000000000000000000000000000000000000000000000000c666f6f206261722062617a0a0000000000000000000000000000000000000000", - }), - nil, - nil, - pipeline.ErrBadInput, - "", + "b": hexutil.MustDecode("0x666f6f206261722062617a0a"), + "b32": utils.Bytes32FromString("stevetoshi sergamoto"), }, - { - "errored task inputs", - "uint256 u, bool b, int256 i, string s", - "$(foo)", - pipeline.NewVarsFrom(map[string]interface{}{ - "foo": "0x000000000000000000000000000000000000000000000000000000000000007b0000000000000000000000000000000000000000000000000000000000000001fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebf0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000b666f6f206261722062617a000000000000000000000000000000000000000000", - }), - []pipeline.Result{{Error: errors.New("uh oh")}}, - nil, - pipeline.ErrTooManyErrors, - "task inputs", - }, - } + nil, + "", + }, + { + "no attribute names", + "address, bytes32", + "$(foo)", + NewVarsFrom(map[string]interface{}{ + "foo": "0x000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001607374657665746f7368692073657267616d6f746f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000005c000000000000000000000000000000000000000000000000000000000000003d000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000002100000000000000000000000000000000000000000000000000000000000000420000000000000000000000000000000000000000000000000000000000000063000000000000000000000000000000000000000000000000000000000000000c666f6f206261722062617a0a0000000000000000000000000000000000000000", + }), + nil, + nil, + ErrBadInput, + "", + }, + { + "errored task inputs", + "uint256 u, bool b, int256 i, string s", + "$(foo)", + NewVarsFrom(map[string]interface{}{ + "foo": "0x000000000000000000000000000000000000000000000000000000000000007b0000000000000000000000000000000000000000000000000000000000000001fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebf0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000b666f6f206261722062617a000000000000000000000000000000000000000000", + }), + []Result{{Error: errors.New("uh oh")}}, + nil, + ErrTooManyErrors, + "task inputs", + }, +} - for _, test := range tests { +func TestETHABIDecodeTask(t *testing.T) { + for _, test := range testsABIDecode { test := test t.Run(test.name, func(t *testing.T) { - task := pipeline.ETHABIDecodeTask{ - BaseTask: pipeline.NewBaseTask(0, "decode", nil, nil, 0), + task := ETHABIDecodeTask{ + BaseTask: NewBaseTask(0, "decode", nil, nil, 0), ABI: test.abi, Data: test.data, } diff --git a/core/services/pipeline/task.eth_abi_encode_test.go b/core/services/pipeline/task.eth_abi_encode_test.go index 6d84486be75..be26c184c4e 100644 --- a/core/services/pipeline/task.eth_abi_encode_test.go +++ b/core/services/pipeline/task.eth_abi_encode_test.go @@ -14,12 +14,15 @@ import ( "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/pipeline" + "github.com/smartcontractkit/chainlink/core/utils" ) func TestETHABIEncodeTask(t *testing.T) { var bytes32 [32]byte copy(bytes32[:], []byte("chainlink chainlink chainlink")) + bytes32hex := utils.StringToHex(string(bytes32[:])) + tests := []struct { name string abi string @@ -59,6 +62,20 @@ func TestETHABIEncodeTask(t *testing.T) { nil, "", }, + { + "bytes32 (hex), bytes, address", + "asdf(bytes32 b, bytes bs, address a)", + `{ "b": $(foo), "bs": $(bar), "a": $(baz) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": bytes32hex, + "bar": []byte("stevetoshi sergeymoto"), + "baz": common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), + }), + nil, + "0x4f5e7a89636861696e6c696e6b20636861696e6c696e6b20636861696e6c696e6b0000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef00000000000000000000000000000000000000000000000000000000000000157374657665746f736869207365726765796d6f746f0000000000000000000000", + nil, + "", + }, { "address[] calldata, uint80, uint32[2]", "chainLink(address[] calldata a, uint80 x, uint32[2] s)", diff --git a/core/services/pipeline/task.eth_tx.go b/core/services/pipeline/task.eth_tx.go index 35f7d737df3..25f428c4155 100644 --- a/core/services/pipeline/task.eth_tx.go +++ b/core/services/pipeline/task.eth_tx.go @@ -2,6 +2,7 @@ package pipeline import ( "context" + "math/big" "reflect" "strconv" @@ -38,7 +39,7 @@ type ETHTxTask struct { //go:generate mockery --name ETHKeyStore --output ./mocks/ --case=underscore type ETHKeyStore interface { - GetRoundRobinAddress(addrs ...common.Address) (common.Address, error) + GetRoundRobinAddress(chainID *big.Int, addrs ...common.Address) (common.Address, error) } var _ Task = (*ETHTxTask)(nil) @@ -104,7 +105,7 @@ func (t *ETHTxTask) Run(_ context.Context, lggr logger.Logger, vars Vars, inputs return Result{Error: err}, runInfo } - fromAddr, err := t.keyStore.GetRoundRobinAddress(fromAddrs...) + fromAddr, err := t.keyStore.GetRoundRobinAddress(chain.ID(), fromAddrs...) if err != nil { err = errors.Wrap(err, "ETHTxTask failed to get fromAddress") lggr.Error(err) diff --git a/core/services/pipeline/task.eth_tx_test.go b/core/services/pipeline/task.eth_tx_test.go index 27e5acfb716..ad7507caa6f 100644 --- a/core/services/pipeline/task.eth_tx_test.go +++ b/core/services/pipeline/task.eth_tx_test.go @@ -13,6 +13,7 @@ import ( "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr" txmmocks "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr/mocks" + "github.com/smartcontractkit/chainlink/core/internal/testutils" "github.com/smartcontractkit/chainlink/core/internal/testutils/configtest" "github.com/smartcontractkit/chainlink/core/internal/testutils/evmtest" "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" @@ -60,7 +61,7 @@ func TestETHTxTask(t *testing.T) { data := []byte("foobar") gasLimit := uint64(12345) txMeta := &txmgr.EthTxMeta{JobID: 321, RequestID: common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2"), RequestTxHash: common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8")} - keyStore.On("GetRoundRobinAddress", from).Return(from, nil) + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) txManager.On("CreateEthTransaction", txmgr.NewTx{ FromAddress: from, ToAddress: to, @@ -103,7 +104,7 @@ func TestETHTxTask(t *testing.T) { data := []byte("foobar") gasLimit := uint64(12345) txMeta := &txmgr.EthTxMeta{JobID: 321, RequestID: common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2"), RequestTxHash: common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8")} - keyStore.On("GetRoundRobinAddress", from).Return(from, nil) + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) txManager.On("CreateEthTransaction", txmgr.NewTx{ FromAddress: from, ToAddress: to, @@ -144,7 +145,7 @@ func TestETHTxTask(t *testing.T) { data := []byte("foobar") gasLimit := uint64(12345) txMeta := &txmgr.EthTxMeta{JobID: 321, RequestID: common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2"), RequestTxHash: common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8")} - keyStore.On("GetRoundRobinAddress", from).Return(from, nil) + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) txManager.On("CreateEthTransaction", txmgr.NewTx{ FromAddress: from, ToAddress: to, @@ -185,7 +186,7 @@ func TestETHTxTask(t *testing.T) { data := []byte("foobar") gasLimit := uint64(12345) txMeta := &txmgr.EthTxMeta{JobID: 321, RequestID: common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2"), RequestTxHash: common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8")} - keyStore.On("GetRoundRobinAddress").Return(from, nil) + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID).Return(from, nil) txManager.On("CreateEthTransaction", txmgr.NewTx{ FromAddress: from, ToAddress: to, @@ -216,7 +217,7 @@ func TestETHTxTask(t *testing.T) { data := []byte("foobar") gasLimit := uint64(12345) txMeta := &txmgr.EthTxMeta{} - keyStore.On("GetRoundRobinAddress", from).Return(from, nil) + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) txManager.On("CreateEthTransaction", txmgr.NewTx{ FromAddress: from, ToAddress: to, @@ -247,7 +248,7 @@ func TestETHTxTask(t *testing.T) { data := []byte("foobar") gasLimit := uint64(999) txMeta := &txmgr.EthTxMeta{JobID: 321, RequestID: common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2"), RequestTxHash: common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8")} - keyStore.On("GetRoundRobinAddress", from).Return(from, nil) + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) txManager.On("CreateEthTransaction", txmgr.NewTx{ FromAddress: from, ToAddress: to, @@ -283,7 +284,7 @@ func TestETHTxTask(t *testing.T) { nil, func(config *configtest.TestGeneralConfig, keyStore *keystoremocks.Eth, txManager *txmmocks.TxManager) { config.Overrides.GlobalEvmGasLimitDefault = null.IntFrom(999) - keyStore.On("GetRoundRobinAddress").Return(nil, errors.New("uh oh")) + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID).Return(nil, errors.New("uh oh")) }, nil, pipeline.ErrTaskRunFailed, "while querying keystore", pipeline.RunInfo{IsRetryable: true}, }, @@ -306,7 +307,7 @@ func TestETHTxTask(t *testing.T) { data := []byte("foobar") gasLimit := uint64(12345) txMeta := &txmgr.EthTxMeta{JobID: 321, RequestID: common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2"), RequestTxHash: common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8")} - keyStore.On("GetRoundRobinAddress", from).Return(from, nil) + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) txManager.On("CreateEthTransaction", txmgr.NewTx{ FromAddress: from, ToAddress: to, @@ -400,7 +401,7 @@ func TestETHTxTask(t *testing.T) { func(config *configtest.TestGeneralConfig, keyStore *keystoremocks.Eth, txManager *txmmocks.TxManager) { config.Overrides.GlobalEvmGasLimitDefault = null.IntFrom(999) from := common.HexToAddress("0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c") - keyStore.On("GetRoundRobinAddress", from).Return(from, nil) + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) txManager.On("CreateEthTransaction", mock.MatchedBy(func(tx txmgr.NewTx) bool { return tx.MinConfirmations == clnull.Uint32From(3) && tx.PipelineTaskRunID != nil })).Return(txmgr.EthTx{}, nil) diff --git a/core/services/pipeline/task.http_test.go b/core/services/pipeline/task.http_test.go index 387aad4ddab..fee71d1676f 100644 --- a/core/services/pipeline/task.http_test.go +++ b/core/services/pipeline/task.http_test.go @@ -166,7 +166,9 @@ func TestHTTPTask_Variables(t *testing.T) { } task.HelperSetDependencies(cfg, db, uuid.UUID{}) - test.vars.Set("meta", test.meta) + err = test.vars.Set("meta", test.meta) + require.NoError(t, err) + result, runInfo := task.Run(context.Background(), logger.TestLogger(t), test.vars, test.inputs) assert.False(t, runInfo.IsPending) assert.False(t, runInfo.IsRetryable) diff --git a/core/services/pipeline/task.jsonparse.go b/core/services/pipeline/task.jsonparse.go index 59eb7d787ed..a7ee0955b2c 100644 --- a/core/services/pipeline/task.jsonparse.go +++ b/core/services/pipeline/task.jsonparse.go @@ -22,9 +22,10 @@ import ( // nil // type JSONParseTask struct { - BaseTask `mapstructure:",squash"` - Path string `json:"path"` - Data string `json:"data"` + BaseTask `mapstructure:",squash"` + Path string `json:"path"` + Separator string `json:"separator"` + Data string `json:"data"` // Lax when disabled will return an error if the path does not exist // Lax when enabled will return nil with no error if the path does not exist Lax string @@ -42,12 +43,14 @@ func (t *JSONParseTask) Run(_ context.Context, _ logger.Logger, vars Vars, input return Result{Error: errors.Wrap(err, "task inputs")}, runInfo } + var sep StringParam + err = errors.Wrap(ResolveParam(&sep, From(t.Separator)), "separator") var ( - path JSONPathParam - data StringParam + path = NewJSONPathParam(string(sep)) + data BytesParam lax BoolParam ) - err = multierr.Combine( + err = multierr.Combine(err, errors.Wrap(ResolveParam(&path, From(VarExpr(t.Path, vars), t.Path)), "path"), errors.Wrap(ResolveParam(&data, From(VarExpr(t.Data, vars), Input(inputs, 0))), "data"), errors.Wrap(ResolveParam(&lax, From(NonemptyString(t.Lax), false)), "lax"), @@ -57,7 +60,7 @@ func (t *JSONParseTask) Run(_ context.Context, _ logger.Logger, vars Vars, input } var decoded interface{} - err = json.Unmarshal([]byte(data), &decoded) + err = json.Unmarshal(data, &decoded) if err != nil { return Result{Error: err}, runInfo } diff --git a/core/services/pipeline/task.jsonparse_test.go b/core/services/pipeline/task.jsonparse_test.go index 42d6dfbe213..6c7c7ce0db4 100644 --- a/core/services/pipeline/task.jsonparse_test.go +++ b/core/services/pipeline/task.jsonparse_test.go @@ -19,6 +19,7 @@ func TestJSONParseTask(t *testing.T) { name string data string path string + separator string lax string vars pipeline.Vars inputs []pipeline.Result @@ -30,6 +31,7 @@ func TestJSONParseTask(t *testing.T) { "array index path", "", "data,0,availability", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data":[{"availability":"0.99991"}]}`}}, @@ -41,6 +43,7 @@ func TestJSONParseTask(t *testing.T) { "float result", "", "availability", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"availability":0.99991}`}}, @@ -52,6 +55,7 @@ func TestJSONParseTask(t *testing.T) { "index array", "", "data,0", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": [0, 1]}`}}, @@ -63,6 +67,7 @@ func TestJSONParseTask(t *testing.T) { "index array of array", "", "data,0,0", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": [[0, 1]]}`}}, @@ -74,6 +79,7 @@ func TestJSONParseTask(t *testing.T) { "index of negative one", "", "data,-1", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": [0, 1]}`}}, @@ -85,6 +91,7 @@ func TestJSONParseTask(t *testing.T) { "index of negative array length", "", "data,-10", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]}`}}, @@ -96,6 +103,7 @@ func TestJSONParseTask(t *testing.T) { "index of negative array length minus one with lax returns nil", "", "data,-12", + "", "true", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]}`}}, @@ -107,6 +115,7 @@ func TestJSONParseTask(t *testing.T) { "index of negative array length minus one without lax returns error", "", "data,-12", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]}`}}, @@ -118,6 +127,7 @@ func TestJSONParseTask(t *testing.T) { "maximum index array with lax returns nil", "", "data,18446744073709551615", + "", "true", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": [0, 1]}`}}, @@ -129,6 +139,7 @@ func TestJSONParseTask(t *testing.T) { "maximum index array without lax returns error", "", "data,18446744073709551615", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": [0, 1]}`}}, @@ -140,6 +151,7 @@ func TestJSONParseTask(t *testing.T) { "overflow index array with lax returns nil", "", "data,18446744073709551616", + "", "true", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": [0, 1]}`}}, @@ -151,6 +163,7 @@ func TestJSONParseTask(t *testing.T) { "overflow index array without lax returns error", "", "data,18446744073709551616", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": [0, 1]}`}}, @@ -162,6 +175,7 @@ func TestJSONParseTask(t *testing.T) { "return array", "", "data,0", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": [[0, 1]]}`}}, @@ -173,6 +187,7 @@ func TestJSONParseTask(t *testing.T) { "return false", "", "data", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": false}`}}, @@ -184,6 +199,7 @@ func TestJSONParseTask(t *testing.T) { "return true", "", "data", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"data": true}`}}, @@ -195,6 +211,7 @@ func TestJSONParseTask(t *testing.T) { "regression test: keys in the path have dots", "", "Realtime Currency Exchange Rate,5. Exchange Rate", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{ @@ -214,10 +231,49 @@ func TestJSONParseTask(t *testing.T) { nil, "", }, + { + "custom separator: keys in the path have commas", + "", + "foo.bar1,bar2,bar3", + ".", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{ + "foo": { + "bar1": "LEND", + "bar1,bar2": "EthLend", + "bar2,bar3": "ETH", + "bar1,bar3": "Ethereum", + "bar1,bar2,bar3": "0.00058217", + "bar1.bar2.bar3": "2020-06-22 19:14:04" + } + }`}}, + "0.00058217", + nil, + "", + }, + { + "custom separator: diabolical keys in the path", + "", + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.,/\\[]{}|<>?_+-=!@#$%^&*()__hacky__separator__foo", + "__hacky__separator__", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{ + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.,/\\[]{}|<>?_+-=!@#$%^&*()": { + "foo": "LEND", + "bar": "EthLend" + } + }`}}, + "LEND", + nil, + "", + }, { "missing top-level key with lax=false returns error", "", "baz", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"foo": 1}`}}, @@ -229,6 +285,7 @@ func TestJSONParseTask(t *testing.T) { "missing nested key with lax=false returns error", "", "foo,bar", + "", "false", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"foo": {}}`}}, @@ -240,6 +297,7 @@ func TestJSONParseTask(t *testing.T) { "missing top-level key with lax=true returns nil", "", "baz", + "", "true", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{}`}}, @@ -251,6 +309,7 @@ func TestJSONParseTask(t *testing.T) { "missing nested key with lax=true returns nil", "", "foo,baz", + "", "true", pipeline.NewVarsFrom(nil), []pipeline.Result{{Value: `{"foo": {}}`}}, @@ -262,6 +321,7 @@ func TestJSONParseTask(t *testing.T) { "variable data", "$(foo.bar)", "data,0,availability", + "", "false", pipeline.NewVarsFrom(map[string]interface{}{ "foo": map[string]interface{}{"bar": `{"data":[{"availability":"0.99991"}]}`}, @@ -275,6 +335,7 @@ func TestJSONParseTask(t *testing.T) { "empty path", "$(foo.bar)", "", + "", "false", pipeline.NewVarsFrom(map[string]interface{}{ "foo": map[string]interface{}{"bar": `{"data":["stevetoshi sergeymoto"]}`}, @@ -288,6 +349,7 @@ func TestJSONParseTask(t *testing.T) { "no data or input", "", "$(chain.link)", + "", "false", pipeline.NewVarsFrom(map[string]interface{}{ "foo": map[string]interface{}{"bar": `{"data":[{"availability":"0.99991"}]}`}, @@ -295,13 +357,14 @@ func TestJSONParseTask(t *testing.T) { }), []pipeline.Result{}, "0.99991", - pipeline.ErrParameterEmpty, + pipeline.ErrIndexOutOfRange, "data", }, { "malformed 'lax' param", "$(foo.bar)", "$(chain.link)", + "", "sergey", pipeline.NewVarsFrom(map[string]interface{}{ "foo": map[string]interface{}{"bar": `{"data":[{"availability":"0.99991"}]}`}, @@ -318,10 +381,11 @@ func TestJSONParseTask(t *testing.T) { test := tt t.Run(test.name, func(t *testing.T) { task := pipeline.JSONParseTask{ - BaseTask: pipeline.NewBaseTask(0, "json", nil, nil, 0), - Path: test.path, - Data: test.data, - Lax: test.lax, + BaseTask: pipeline.NewBaseTask(0, "json", nil, nil, 0), + Path: test.path, + Separator: test.separator, + Data: test.data, + Lax: test.lax, } result, runInfo := task.Run(context.Background(), logger.TestLogger(t), test.vars, test.inputs) assert.False(t, runInfo.IsPending) diff --git a/core/services/pipeline/task.vrfv2.go b/core/services/pipeline/task.vrfv2.go index 6a6eebc83d1..b9c09b7d747 100644 --- a/core/services/pipeline/task.vrfv2.go +++ b/core/services/pipeline/task.vrfv2.go @@ -138,5 +138,10 @@ func (t *VRFTaskV2) Run(_ context.Context, _ logger.Logger, vars Vars, inputs [] results["output"] = hexutil.Encode(b) // RequestID needs to be a [32]byte for EthTxMeta. results["requestID"] = hexutil.Encode(requestId.Bytes()) + + // store vrf proof and request commitment separately so they can be used in a batch fashion + results["proof"] = onChainProof + results["requestCommitment"] = rc + return Result{Value: results}, runInfo } diff --git a/core/services/pipeline/task_object_params.go b/core/services/pipeline/task_object_params.go index ed4b4a0840a..9bcc0d62dc9 100644 --- a/core/services/pipeline/task_object_params.go +++ b/core/services/pipeline/task_object_params.go @@ -129,12 +129,3 @@ func (o *ObjectParam) UnmarshalPipelineParam(val interface{}) error { return fmt.Errorf("bad input for task: %T", val) } - -func MustNewObjectParam(val interface{}) *ObjectParam { - var value ObjectParam - err := value.UnmarshalPipelineParam(val) - if err != nil { - panic(fmt.Errorf("failed to init ObjectParam from %v, err: %w", val, err)) - } - return &value -} diff --git a/core/services/pipeline/task_object_params_test.go b/core/services/pipeline/task_object_params_test.go index 11edbf11781..7e076d90860 100644 --- a/core/services/pipeline/task_object_params_test.go +++ b/core/services/pipeline/task_object_params_test.go @@ -77,12 +77,12 @@ func TestObjectParam_Marshal(t *testing.T) { input *pipeline.ObjectParam output string }{ - {"nil", pipeline.MustNewObjectParam(nil), "null"}, - {"bool", pipeline.MustNewObjectParam(true), "true"}, - {"integer", pipeline.MustNewObjectParam(17), `"17"`}, - {"string", pipeline.MustNewObjectParam("hello world"), `"hello world"`}, - {"array", pipeline.MustNewObjectParam([]int{17, 19}), "[17,19]"}, - {"map", pipeline.MustNewObjectParam(map[string]interface{}{"key": 19}), `{"key":19}`}, + {"nil", mustNewObjectParam(t, nil), "null"}, + {"bool", mustNewObjectParam(t, true), "true"}, + {"integer", mustNewObjectParam(t, 17), `"17"`}, + {"string", mustNewObjectParam(t, "hello world"), `"hello world"`}, + {"array", mustNewObjectParam(t, []int{17, 19}), "[17,19]"}, + {"map", mustNewObjectParam(t, map[string]interface{}{"key": 19}), `{"key":19}`}, } for _, test := range tests { diff --git a/core/services/pipeline/task_params.go b/core/services/pipeline/task_params.go index 4ca9c2869be..5894e9b149e 100644 --- a/core/services/pipeline/task_params.go +++ b/core/services/pipeline/task_params.go @@ -1,11 +1,8 @@ package pipeline import ( - "bytes" - "encoding/base64" "encoding/hex" "encoding/json" - "fmt" "math" "math/big" "net/url" @@ -50,156 +47,6 @@ func ResolveParam(out PipelineParamUnmarshaler, getters []GetterFunc) error { return nil } -type GetterFunc func() (interface{}, error) - -func From(getters ...interface{}) []GetterFunc { - var gfs []GetterFunc - for _, g := range getters { - switch v := g.(type) { - case GetterFunc: - gfs = append(gfs, v) - - default: - // If a bare value is passed in, create a simple getter - gfs = append(gfs, func() (interface{}, error) { - return v, nil - }) - } - } - return gfs -} - -func VarExpr(s string, vars Vars) GetterFunc { - return func() (interface{}, error) { - trimmed := strings.TrimSpace(s) - if len(trimmed) == 0 { - return nil, ErrParameterEmpty - } - isVariableExpr := strings.Count(trimmed, "$") == 1 && trimmed[:2] == "$(" && trimmed[len(trimmed)-1] == ')' - if !isVariableExpr { - return nil, ErrParameterEmpty - } - keypath := strings.TrimSpace(trimmed[2 : len(trimmed)-1]) - val, err := vars.Get(keypath) - if err != nil { - return nil, err - } else if as, is := val.(error); is { - return nil, errors.Wrapf(ErrTooManyErrors, "VarExpr: %v", as) - } - return val, nil - } -} - -func JSONWithVarExprs(s string, vars Vars, allowErrors bool) GetterFunc { - return func() (interface{}, error) { - if strings.TrimSpace(s) == "" { - return nil, ErrParameterEmpty - } - replaced := variableRegexp.ReplaceAllFunc([]byte(s), func(expr []byte) []byte { - keypathStr := strings.TrimSpace(string(expr[2 : len(expr)-1])) - return []byte(fmt.Sprintf(`{ "__chainlink_var_expr__": "%v" }`, keypathStr)) - }) - var val interface{} - err := json.Unmarshal(replaced, &val) - if err != nil { - return nil, errors.Wrapf(ErrBadInput, "while interpolating variables in JSON payload: %v", err) - } - return mapGoValue(val, func(val interface{}) (interface{}, error) { - if m, is := val.(map[string]interface{}); is { - maybeKeypath, exists := m["__chainlink_var_expr__"] - if !exists { - return val, nil - } - keypath, is := maybeKeypath.(string) - if !is { - return nil, errors.New("you cannot use __chainlink_var_expr__ in your JSON") - } - newVal, err := vars.Get(keypath) - if err != nil { - return nil, err - } else if err, is := newVal.(error); is && !allowErrors { - return nil, errors.Wrapf(ErrBadInput, "JSONWithVarExprs: %v", err) - } - return newVal, nil - } - return val, nil - }) - } -} - -func mapGoValue(v interface{}, fn func(val interface{}) (interface{}, error)) (x interface{}, err error) { - type item struct { - val interface{} - parentMap map[string]interface{} - parentKey string - parentSlice []interface{} - parentIdx int - } - - stack := []item{{val: v}} - var current item - - for len(stack) > 0 { - current = stack[0] - stack = stack[1:] - - val, err := fn(current.val) - if err != nil { - return nil, err - } - - if current.parentMap != nil { - current.parentMap[current.parentKey] = val - } else if current.parentSlice != nil { - current.parentSlice[current.parentIdx] = val - } - - if asMap, isMap := val.(map[string]interface{}); isMap { - for key := range asMap { - stack = append(stack, item{val: asMap[key], parentMap: asMap, parentKey: key}) - } - } else if asSlice, isSlice := val.([]interface{}); isSlice { - for i := range asSlice { - stack = append(stack, item{val: asSlice[i], parentSlice: asSlice, parentIdx: i}) - } - } - } - return v, nil -} - -func NonemptyString(s string) GetterFunc { - return func() (interface{}, error) { - trimmed := strings.TrimSpace(s) - if len(trimmed) == 0 { - return nil, ErrParameterEmpty - } - return trimmed, nil - } -} - -func Input(inputs []Result, index int) GetterFunc { - return func() (interface{}, error) { - if len(inputs)-1 < index { - return nil, ErrParameterEmpty - } - return inputs[index].Value, inputs[index].Error - } -} - -func Inputs(inputs []Result) GetterFunc { - return func() (interface{}, error) { - var vals []interface{} - for _, input := range inputs { - if input.Error != nil { - vals = append(vals, input.Error) - } else { - vals = append(vals, input.Value) - } - } - return vals, nil - } -} - type StringParam string func (s *StringParam) UnmarshalPipelineParam(val interface{}) error { @@ -210,19 +57,16 @@ func (s *StringParam) UnmarshalPipelineParam(val interface{}) error { case []byte: *s = StringParam(string(v)) return nil - case ObjectParam: if v.Type == StringType { *s = v.StringValue return nil } - case *ObjectParam: if v.Type == StringType { *s = v.StringValue return nil } - } return errors.Wrapf(ErrBadInput, "expected string, got %T", val) } @@ -232,29 +76,36 @@ type BytesParam []byte func (b *BytesParam) UnmarshalPipelineParam(val interface{}) error { switch v := val.(type) { case string: - // try hex first - if len(v) >= 2 && v[:2] == "0x" { - bs, err := hex.DecodeString(v[2:]) + // first check if this is a valid hex-encoded string + if utils.HasHexPrefix(v) { + noHexPrefix := utils.RemoveHexPrefix(v) + bs, err := hex.DecodeString(noHexPrefix) if err == nil { - *b = BytesParam(bs) + *b = bs return nil } - // The base64 encoding for the binary 0b110100110001 is '0x', so carry on. } - // try decoding as base64 first, in case this is a string from the database - bs, err := base64.StdEncoding.DecodeString(v) - if err != nil { - bs = []byte(v) - } - *b = BytesParam(bs) - case []byte: *b = BytesParam(v) + return nil + case []byte: + *b = v + return nil case nil: *b = BytesParam(nil) - default: - return errors.Wrapf(ErrBadInput, "expected array of bytes, got %T", val) + return nil + case ObjectParam: + if v.Type == StringType { + *b = BytesParam(v.StringValue) + return nil + } + case *ObjectParam: + if v.Type == StringType { + *b = BytesParam(v.StringValue) + return nil + } } - return nil + + return errors.Wrapf(ErrBadInput, "expected array of bytes, got %T", val) } type Uint64Param uint64 @@ -300,6 +151,14 @@ type MaybeUint64Param struct { isSet bool } +// NewMaybeUint64Param creates new instance of MaybeUint64Param +func NewMaybeUint64Param(n uint64, isSet bool) MaybeUint64Param { + return MaybeUint64Param{ + n: n, + isSet: isSet, + } +} + func (p *MaybeUint64Param) UnmarshalPipelineParam(val interface{}) error { var n uint64 switch v := val.(type) { @@ -353,6 +212,14 @@ type MaybeInt32Param struct { isSet bool } +// NewMaybeInt32Param creates new instance of MaybeInt32Param +func NewMaybeInt32Param(n int32, isSet bool) MaybeInt32Param { + return MaybeInt32Param{ + n: n, + isSet: isSet, + } +} + func (p *MaybeInt32Param) UnmarshalPipelineParam(val interface{}) error { var n int32 switch v := val.(type) { @@ -433,20 +300,18 @@ func (b *BoolParam) UnmarshalPipelineParam(val interface{}) error { case bool: *b = BoolParam(v) return nil - case ObjectParam: if v.Type == BoolType { *b = v.BoolValue return nil } - case *ObjectParam: if v.Type == BoolType { *b = v.BoolValue return nil } - } + return errors.Wrapf(ErrBadInput, "expected true or false, got %T", val) } @@ -499,20 +364,19 @@ func (a *AddressParam) UnmarshalPipelineParam(val interface{}) error { case string: return a.UnmarshalPipelineParam([]byte(v)) case []byte: - if bytes.Equal(v[:2], []byte("0x")) && len(v) == 42 { + if utils.HasHexPrefix(string(v)) && len(v) == 42 { *a = AddressParam(common.HexToAddress(string(v))) return nil } else if len(v) == 20 { copy((*a)[:], v) return nil } - return ErrBadInput case common.Address: *a = AddressParam(v) - default: - return ErrBadInput + return nil } - return nil + + return errors.Wrapf(ErrBadInput, "expected common.Address, got %T", val) } // MapParam accepts maps or JSON-encoded strings @@ -654,12 +518,12 @@ func (s *HashSliceParam) UnmarshalPipelineParam(val interface{}) error { case string: err := json.Unmarshal([]byte(v), &dsp) if err != nil { - return err + return errors.Wrapf(ErrBadInput, "HashSliceParam: %v", err) } case []byte: err := json.Unmarshal(v, &dsp) if err != nil { - return err + return errors.Wrapf(ErrBadInput, "HashSliceParam: %v", err) } case []interface{}: for _, h := range v { @@ -670,6 +534,16 @@ func (s *HashSliceParam) UnmarshalPipelineParam(val interface{}) error { return errors.Wrapf(ErrBadInput, "HashSliceParam: %v", err) } dsp = append(dsp, hash) + } else if b, is := h.([]byte); is { + // same semantic as AddressSliceParam + var hash common.Hash + err := hash.UnmarshalText(b) + if err != nil { + return errors.Wrapf(ErrBadInput, "HashSliceParam: %v", err) + } + dsp = append(dsp, hash) + } else if h, is := h.(common.Hash); is { + dsp = append(dsp, h) } else { return errors.Wrap(ErrBadInput, "HashSliceParam") } @@ -718,7 +592,23 @@ func (s *AddressSliceParam) UnmarshalPipelineParam(val interface{}) error { type JSONPathParam []string +// NewJSONPathParam returns a new JSONPathParam using the given separator, or the default if empty. +func NewJSONPathParam(sep string) JSONPathParam { + if len(sep) == 0 { + return nil + } + return []string{sep} +} + +// UnmarshalPipelineParam unmarshals a slice of strings from val. +// If val is a string or []byte, it is split on a separator. +// The default separator is ',' but can be overridden by initializing via NewJSONPathParam. func (p *JSONPathParam) UnmarshalPipelineParam(val interface{}) error { + sep := "," + if len(*p) > 0 { + // custom separator + sep = (*p)[0] + } var ssp JSONPathParam switch v := val.(type) { case nil: @@ -737,12 +627,12 @@ func (p *JSONPathParam) UnmarshalPipelineParam(val interface{}) error { if len(v) == 0 { return nil } - ssp = strings.Split(v, ",") + ssp = strings.Split(v, sep) case []byte: if len(v) == 0 { return nil } - ssp = strings.Split(string(v), ",") + ssp = strings.Split(string(v), sep) default: return ErrBadInput } @@ -754,6 +644,13 @@ type MaybeBigIntParam struct { n *big.Int } +// NewMaybeBigIntParam creates a new instance of MaybeBigIntParam +func NewMaybeBigIntParam(n *big.Int) MaybeBigIntParam { + return MaybeBigIntParam{ + n: n, + } +} + func (p *MaybeBigIntParam) UnmarshalPipelineParam(val interface{}) error { var n *big.Int switch v := val.(type) { diff --git a/core/services/pipeline/task_params_test.go b/core/services/pipeline/task_params_test.go index a11d6ce3608..cbbca3d2ab8 100644 --- a/core/services/pipeline/task_params_test.go +++ b/core/services/pipeline/task_params_test.go @@ -1,18 +1,20 @@ package pipeline_test import ( - "encoding/base64" + "math/big" "net/url" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/mock" "github.com/pkg/errors" "github.com/shopspring/decimal" "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink/core/services/pipeline" + "github.com/smartcontractkit/chainlink/core/services/pipeline/mocks" ) func TestStringParam_UnmarshalPipelineParam(t *testing.T) { @@ -27,7 +29,8 @@ func TestStringParam_UnmarshalPipelineParam(t *testing.T) { {"string", "foo bar baz", pipeline.StringParam("foo bar baz"), nil}, {"[]byte", []byte("foo bar baz"), pipeline.StringParam("foo bar baz"), nil}, {"int", 12345, pipeline.StringParam(""), pipeline.ErrBadInput}, - {"object", pipeline.MustNewObjectParam(`boz bar bap`), pipeline.StringParam("boz bar bap"), nil}, + {"*object", mustNewObjectParam(t, `boz bar bap`), pipeline.StringParam("boz bar bap"), nil}, + {"object", *mustNewObjectParam(t, `boz bar bap`), pipeline.StringParam("boz bar bap"), nil}, } for _, test := range tests { @@ -55,14 +58,10 @@ func TestBytesParam_UnmarshalPipelineParam(t *testing.T) { {"string", "foo bar baz", pipeline.BytesParam("foo bar baz"), nil}, {"[]byte", []byte("foo bar baz"), pipeline.BytesParam("foo bar baz"), nil}, {"int", 12345, pipeline.BytesParam(nil), pipeline.ErrBadInput}, - - // The base64 encoding for the binary 0b110100110001 is '0x', so we must not error when hex fails, since it might actually be b64. - {"hex-invalid", "0xh", - pipeline.BytesParam("0xh"), nil}, - {"b64-hex-prefix", base64.StdEncoding.EncodeToString([]byte{0b11010011, 0b00011000, 0b01001101}), - pipeline.BytesParam([]byte{0b11010011, 0b00011000, 0b01001101}), nil}, - {"b64-hex-prefix-2", base64.StdEncoding.EncodeToString(hexutil.MustDecode("0xd3184d")), - pipeline.BytesParam(hexutil.MustDecode("0xd3184d")), nil}, + {"hex-invalid", "0xh", pipeline.BytesParam("0xh"), nil}, + {"valid-hex", hexutil.MustDecode("0xd3184d"), pipeline.BytesParam(hexutil.MustDecode("0xd3184d")), nil}, + {"*object", mustNewObjectParam(t, `boz bar bap`), pipeline.BytesParam("boz bar bap"), nil}, + {"object", *mustNewObjectParam(t, `boz bar bap`), pipeline.BytesParam("boz bar bap"), nil}, } for _, test := range tests { @@ -185,6 +184,7 @@ func TestUint64Param_UnmarshalPipelineParam(t *testing.T) { {"uint16", uint16(123), pipeline.Uint64Param(123), nil}, {"uint32", uint32(123), pipeline.Uint64Param(123), nil}, {"uint64", uint64(123), pipeline.Uint64Param(123), nil}, + {"float64", float64(123), pipeline.Uint64Param(123), nil}, {"bool", true, pipeline.Uint64Param(0), pipeline.ErrBadInput}, } @@ -201,6 +201,131 @@ func TestUint64Param_UnmarshalPipelineParam(t *testing.T) { } } +func TestMaybeUint64Param_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"string", "123", pipeline.NewMaybeUint64Param(123, true), nil}, + {"int", int(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"int8", int8(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"int16", int16(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"int32", int32(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"int64", int64(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"uint", uint(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"uint8", uint8(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"uint16", uint16(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"uint32", uint32(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"uint64", uint64(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"float64", float64(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"bool", true, pipeline.NewMaybeUint64Param(0, false), pipeline.ErrBadInput}, + {"empty string", "", pipeline.NewMaybeUint64Param(0, false), nil}, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + var p pipeline.MaybeUint64Param + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + +func TestMaybeBigIntParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + fromInt := func(n int64) pipeline.MaybeBigIntParam { + return pipeline.NewMaybeBigIntParam(big.NewInt(n)) + } + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"string", "123", fromInt(123), nil}, + {"empty string", "", pipeline.NewMaybeBigIntParam(nil), nil}, + {"nil", nil, pipeline.NewMaybeBigIntParam(nil), nil}, + {"*big.Int", big.NewInt(123), fromInt(123), nil}, + {"int", int(123), fromInt(123), nil}, + {"int8", int8(123), fromInt(123), nil}, + {"int16", int16(123), fromInt(123), nil}, + {"int32", int32(123), fromInt(123), nil}, + {"int64", int64(123), fromInt(123), nil}, + {"uint", uint(123), fromInt(123), nil}, + {"uint8", uint8(123), fromInt(123), nil}, + {"uint16", uint16(123), fromInt(123), nil}, + {"uint32", uint32(123), fromInt(123), nil}, + {"uint64", uint64(123), fromInt(123), nil}, + {"float64", float64(123), fromInt(123), nil}, + {"bool", true, pipeline.NewMaybeBigIntParam(nil), pipeline.ErrBadInput}, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + var p pipeline.MaybeBigIntParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + +func TestMaybeInt32Param_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"string", "123", pipeline.NewMaybeInt32Param(123, true), nil}, + {"int", int(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"int8", int8(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"int16", int16(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"int32", int32(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"int64", int64(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"uint", uint(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"uint8", uint8(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"uint16", uint16(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"uint32", uint32(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"uint64", uint64(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"float64", float64(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"bool", true, pipeline.NewMaybeInt32Param(0, false), pipeline.ErrBadInput}, + {"empty string", "", pipeline.NewMaybeInt32Param(0, false), nil}, + {"string overflow", "100000000000", pipeline.NewMaybeInt32Param(0, false), pipeline.ErrBadInput}, + {"int64 overflow", int64(123 << 32), pipeline.NewMaybeInt32Param(0, false), pipeline.ErrBadInput}, + {"negative int64 overflow", -int64(123 << 32), pipeline.NewMaybeInt32Param(0, false), pipeline.ErrBadInput}, + {"uint64 overflow", uint64(123 << 32), pipeline.NewMaybeInt32Param(0, false), pipeline.ErrBadInput}, + {"float overflow", float64(123 << 32), pipeline.NewMaybeInt32Param(0, false), pipeline.ErrBadInput}, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + var p pipeline.MaybeInt32Param + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + func TestBoolParam_UnmarshalPipelineParam(t *testing.T) { t.Parallel() @@ -215,7 +340,8 @@ func TestBoolParam_UnmarshalPipelineParam(t *testing.T) { {"bool true", true, pipeline.BoolParam(true), nil}, {"bool false", false, pipeline.BoolParam(false), nil}, {"int", int8(123), pipeline.BoolParam(false), pipeline.ErrBadInput}, - {"object", pipeline.MustNewObjectParam(true), pipeline.BoolParam(true), nil}, + {"*object", mustNewObjectParam(t, true), pipeline.BoolParam(true), nil}, + {"object", *mustNewObjectParam(t, true), pipeline.BoolParam(true), nil}, } for _, test := range tests { @@ -247,7 +373,7 @@ func TestDecimalParam_UnmarshalPipelineParam(t *testing.T) { {"float32", float32(123.45), pipeline.DecimalParam(d), nil}, {"float64", float64(123.45), pipeline.DecimalParam(d), nil}, {"bool", false, pipeline.DecimalParam(dNull), pipeline.ErrBadInput}, - {"object", pipeline.MustNewObjectParam(123.45), pipeline.DecimalParam(d), nil}, + {"object", mustNewObjectParam(t, 123.45), pipeline.DecimalParam(d), nil}, } for _, test := range tests { @@ -325,15 +451,32 @@ func TestMapParam_UnmarshalPipelineParam(t *testing.T) { }, } - var got1 pipeline.MapParam - err := got1.UnmarshalPipelineParam(inputStr) - require.NoError(t, err) - require.Equal(t, expected, got1) + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"from string", inputStr, expected, nil}, + {"from []byte", []byte(inputStr), expected, nil}, + {"from map", inputMap, expected, nil}, + {"from nil", nil, pipeline.MapParam(nil), nil}, + {"from *object", mustNewObjectParam(t, inputMap), expected, nil}, + {"from object", *mustNewObjectParam(t, inputMap), expected, nil}, + {"wrong type", 123, pipeline.MapParam(nil), pipeline.ErrBadInput}, + } - var got2 pipeline.MapParam - err = got2.UnmarshalPipelineParam(inputMap) - require.NoError(t, err) - require.Equal(t, expected, got2) + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + var p pipeline.MapParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } } func TestSliceParam_UnmarshalPipelineParam(t *testing.T) { @@ -349,6 +492,7 @@ func TestSliceParam_UnmarshalPipelineParam(t *testing.T) { {"[]byte", []byte(`[1, 2, 3]`), pipeline.SliceParam([]interface{}{float64(1), float64(2), float64(3)}), nil}, {"string", `[1, 2, 3]`, pipeline.SliceParam([]interface{}{float64(1), float64(2), float64(3)}), nil}, {"bool", true, pipeline.SliceParam(nil), pipeline.ErrBadInput}, + {"nil", nil, pipeline.SliceParam(nil), nil}, } for _, test := range tests { @@ -364,6 +508,44 @@ func TestSliceParam_UnmarshalPipelineParam(t *testing.T) { } } +func TestHashSliceParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + hash1 := common.HexToHash("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + hash2 := common.HexToHash("0xcafebabecafebabecafebabecafebabecafebabedeadbeefdeadbeefdeadbeef") + expected := pipeline.HashSliceParam{hash1, hash2} + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"json", `[ "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", "0xcafebabecafebabecafebabecafebabecafebabedeadbeefdeadbeefdeadbeef" ]`, expected, nil}, + {"[]common.Hash", []common.Hash{hash1, hash2}, expected, nil}, + {"[]interface{} with common.Hash", []interface{}{hash1, hash2}, expected, nil}, + {"[]interface{} with strings", []interface{}{hash1.String(), hash2.String()}, expected, nil}, + {"[]interface{} with []byte", []interface{}{[]byte(hash1.String()), []byte(hash2.String())}, expected, nil}, + {"nil", nil, pipeline.HashSliceParam(nil), nil}, + {"bad json", `[ "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" "0xcafebabecafebabecafebabecafebabecafebabedeadbeefdeadbeefdeadbeef" ]`, nil, pipeline.ErrBadInput}, + {"[]interface{} with bad types", []interface{}{123, true}, nil, pipeline.ErrBadInput}, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + var p pipeline.HashSliceParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + if test.expected != nil { + require.Equal(t, test.expected, p) + } + }) + } +} + func TestSliceParam_FilterErrors(t *testing.T) { t.Parallel() @@ -377,6 +559,7 @@ func TestDecimalSliceParam_UnmarshalPipelineParam(t *testing.T) { t.Parallel() expected := pipeline.DecimalSliceParam{*mustDecimal(t, "1.1"), *mustDecimal(t, "2.2"), *mustDecimal(t, "3.3")} + decimalsSlice := []decimal.Decimal{*mustDecimal(t, "1.1"), *mustDecimal(t, "2.2"), *mustDecimal(t, "3.3")} tests := []struct { name string @@ -389,6 +572,8 @@ func TestDecimalSliceParam_UnmarshalPipelineParam(t *testing.T) { {"[]byte", `[1.1, "2.2", 3.3]`, expected, nil}, {"[]interface{} with error", `[1.1, true, "abc"]`, pipeline.DecimalSliceParam(nil), pipeline.ErrBadInput}, {"bool", true, pipeline.DecimalSliceParam(nil), pipeline.ErrBadInput}, + {"nil", nil, pipeline.DecimalSliceParam(nil), nil}, + {"[]decimal.Decimal", decimalsSlice, expected, nil}, } for _, test := range tests { @@ -419,6 +604,7 @@ func TestJSONPathParam_UnmarshalPipelineParam(t *testing.T) { {"string", `1.1,2.2,3.3,sergey`, expected, nil}, {"[]byte", []byte(`1.1,2.2,3.3,sergey`), expected, nil}, {"bool", true, pipeline.JSONPathParam(nil), pipeline.ErrBadInput}, + {"nil", nil, pipeline.JSONPathParam(nil), nil}, } for _, test := range tests { @@ -434,90 +620,103 @@ func TestJSONPathParam_UnmarshalPipelineParam(t *testing.T) { } } -func TestVarExpr(t *testing.T) { +func TestResolveValue(t *testing.T) { t.Parallel() - vars := createTestVars() + t.Run("calls getters in order until the first one that returns without ErrParameterEmpty", func(t *testing.T) { + t.Parallel() + + param := new(mocks.PipelineParamUnmarshaler) + param.On("UnmarshalPipelineParam", mock.Anything).Return(nil) + + called := []int{} + getters := []pipeline.GetterFunc{ + func() (interface{}, error) { + called = append(called, 0) + return nil, errors.Wrap(pipeline.ErrParameterEmpty, "make sure it still notices when wrapped") + }, + func() (interface{}, error) { + called = append(called, 1) + return 123, nil + }, + func() (interface{}, error) { + called = append(called, 2) + return 123, nil + }, + } + + err := pipeline.ResolveParam(param, getters) + require.NoError(t, err) + require.Equal(t, []int{0, 1}, called) + + param.AssertExpectations(t) + }) - tests := []struct { - expr string - result interface{} - err error - }{ - // no errors - {" $( foo.bar ) ", "value", nil}, - {" $( zet)", 123, nil}, - {"$(arr.1 ) ", 200, nil}, - // errors - {" $( missing)", nil, pipeline.ErrKeypathNotFound}, - {" $$( zet)", nil, pipeline.ErrParameterEmpty}, - {" ", nil, pipeline.ErrParameterEmpty}, - } + t.Run("returns any GetterFunc error that isn't ErrParameterEmpty", func(t *testing.T) { + t.Parallel() + + param := new(mocks.PipelineParamUnmarshaler) + called := []int{} + expectedErr := errors.New("some other issue") + + getters := []pipeline.GetterFunc{ + func() (interface{}, error) { + called = append(called, 0) + return nil, expectedErr + }, + func() (interface{}, error) { + called = append(called, 1) + return 123, nil + }, + func() (interface{}, error) { + called = append(called, 2) + return 123, nil + }, + } + + err := pipeline.ResolveParam(param, getters) + require.Equal(t, expectedErr, err) + require.Equal(t, []int{0}, called) + }) - for _, test := range tests { - test := test - t.Run(test.expr, func(t *testing.T) { - t.Parallel() + t.Run("calls UnmarshalPipelineParam with the value obtained from the GetterFuncs", func(t *testing.T) { + t.Parallel() - getter := pipeline.VarExpr(test.expr, vars) - v, err := getter() - if test.err == nil { - require.NoError(t, err) - require.Equal(t, test.result, v) - } else { - require.ErrorIs(t, err, test.err) - } - }) - } -} + expectedValue := 123 -func TestJSONWithVarExprs(t *testing.T) { - t.Parallel() + param := new(mocks.PipelineParamUnmarshaler) + param.On("UnmarshalPipelineParam", expectedValue).Return(nil) - vars := createTestVars() + getters := []pipeline.GetterFunc{ + func() (interface{}, error) { + return expectedValue, nil + }, + } - tests := []struct { - json string - field string - result interface{} - err error - }{ - // no errors - {`{ "x": $(zet) }`, "x", 123, nil}, - {`{ "x": { "y": $(zet) } }`, "x", map[string]interface{}{"y": 123}, nil}, - {`{ "z": "foo" }`, "z", "foo", nil}, - // errors - {`{ "x": $(missing) }`, "x", nil, pipeline.ErrKeypathNotFound}, - {`{ "x": "$(zet)" }`, "x", "$(zet)", pipeline.ErrBadInput}, - {`{ "$(foo.bar)": $(zet) }`, "value", 123, pipeline.ErrBadInput}, - } + err := pipeline.ResolveParam(param, getters) + require.NoError(t, err) - for _, test := range tests { - test := test - t.Run(test.json, func(t *testing.T) { - t.Parallel() + param.AssertExpectations(t) + }) - getter := pipeline.JSONWithVarExprs(test.json, vars, false) - v, err := getter() - if test.err != nil { - require.ErrorIs(t, err, test.err) - } else { - require.NoError(t, err) - m := v.(map[string]interface{}) - require.Equal(t, test.result, m[test.field]) - } - }) - } -} + t.Run("returns any error returned by UnmarshalPipelineParam", func(t *testing.T) { + t.Parallel() -func createTestVars() pipeline.Vars { - return pipeline.NewVarsFrom(map[string]interface{}{ - "foo": map[string]interface{}{ - "bar": "value", - }, - "zet": 123, - "arr": []interface{}{ - 100, 200, 300, - }, + expectedValue := 123 + expectedErr := errors.New("some issue") + + param := new(mocks.PipelineParamUnmarshaler) + param.On("UnmarshalPipelineParam", expectedValue).Return(expectedErr) + + getters := []pipeline.GetterFunc{ + func() (interface{}, error) { + return expectedValue, nil + }, + } + + err := pipeline.ResolveParam(param, getters) + require.Equal(t, expectedErr, err) + + param.AssertExpectations(t) }) } diff --git a/core/services/pipeline/test_helpers_test.go b/core/services/pipeline/test_helpers_test.go index 0f3d5c4101d..b5af10e1d48 100644 --- a/core/services/pipeline/test_helpers_test.go +++ b/core/services/pipeline/test_helpers_test.go @@ -13,6 +13,7 @@ import ( "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/services/pg" + "github.com/smartcontractkit/chainlink/core/services/pipeline" "github.com/smartcontractkit/sqlx" ) @@ -48,3 +49,11 @@ func makeBridge(t *testing.T, db *sqlx.DB, expectedRequest, response interface{} return server, bt.Name.String() } + +func mustNewObjectParam(t *testing.T, val interface{}) *pipeline.ObjectParam { + var value pipeline.ObjectParam + if err := value.UnmarshalPipelineParam(val); err != nil { + t.Fatalf("failed to init ObjectParam from %v, err: %v", val, err) + } + return &value +} diff --git a/core/services/pipeline/variables.go b/core/services/pipeline/variables.go index 33418b02042..3c0ef6e4859 100644 --- a/core/services/pipeline/variables.go +++ b/core/services/pipeline/variables.go @@ -1,7 +1,6 @@ package pipeline import ( - "bytes" "regexp" "strconv" "strings" @@ -11,8 +10,8 @@ import ( var ( ErrKeypathNotFound = errors.New("keypath not found") - ErrKeypathTooDeep = errors.New("keypath too deep (maximum 2 keys)") ErrVarsRoot = errors.New("cannot get/set the root of a pipeline.Vars") + ErrVarsSetNested = errors.New("cannot set a nested key of a pipeline.Vars") variableRegexp = regexp.MustCompile(`\$\(\s*([a-zA-Z0-9_\.]+)\s*\)`) ) @@ -21,6 +20,8 @@ type Vars struct { vars map[string]interface{} } +// NewVarsFrom creates new Vars from the given map. +// If the map is nil, a new map instance will be created. func NewVarsFrom(m map[string]interface{}) Vars { if m == nil { m = make(map[string]interface{}) @@ -28,115 +29,74 @@ func NewVarsFrom(m map[string]interface{}) Vars { return Vars{vars: m} } -func (vars Vars) Copy() Vars { - m := make(map[string]interface{}) - for k, v := range vars.vars { - m[k] = v - } - return Vars{vars: m} -} - +// Get returns the value for the given keypath or error. +// The keypath can consist of one or two parts, e.g. "foo" or "foo.bar". +// The second part of the keypath can be an index of a slice. func (vars Vars) Get(keypathStr string) (interface{}, error) { - keypath, err := newKeypathFromString(keypathStr) + keypathStr = strings.TrimSpace(keypathStr) + keypath, err := NewKeypathFromString(keypathStr) if err != nil { return nil, err } - - numParts := keypath.NumParts() - - if numParts == 0 { + if keypath.NumParts == 0 { return nil, ErrVarsRoot } var val interface{} var exists bool - if numParts >= 1 { - val, exists = vars.vars[string(keypath[0])] + if keypath.NumParts >= 1 { + val, exists = vars.vars[keypath.Part0] if !exists { - return nil, errors.Wrapf(ErrKeypathNotFound, "key %v / keypath %v", string(keypath[0]), keypath.String()) + return nil, errors.Wrapf(ErrKeypathNotFound, "key %v / keypath %v", keypath.Part0, keypathStr) } } - if numParts == 2 { + if keypath.NumParts == 2 { switch v := val.(type) { case map[string]interface{}: - val, exists = v[string(keypath[1])] + val, exists = v[keypath.Part1] if !exists { - return nil, errors.Wrapf(ErrKeypathNotFound, "key %v / keypath %v", string(keypath[1]), keypath.String()) + return nil, errors.Wrapf(ErrKeypathNotFound, "key %v / keypath %v", keypath.Part1, keypathStr) } case []interface{}: - idx, err := strconv.ParseInt(string(keypath[1]), 10, 64) + idx, err := strconv.ParseInt(keypath.Part1, 10, 64) if err != nil { return nil, errors.Wrapf(ErrKeypathNotFound, "could not parse key as integer: %v", err) - } else if idx > int64(len(v)-1) { - return nil, errors.Wrapf(ErrKeypathNotFound, "index %v out of range (length %v / keypath %v)", idx, len(v), keypath.String()) + } else if idx < 0 || idx > int64(len(v)-1) { + return nil, errors.Wrapf(ErrIndexOutOfRange, "index %v out of range (length %v / keypath %v)", idx, len(v), keypathStr) } val = v[idx] default: - return nil, errors.Wrapf(ErrKeypathNotFound, "value at key '%v' is a %T, not a map or slice", string(keypath[0]), val) + return nil, errors.Wrapf(ErrKeypathNotFound, "value at key '%v' is a %T, not a map or slice", keypath.Part0, val) } } return val, nil } -func (vars Vars) Set(dotID string, value interface{}) { +// Set sets a top-level variable specified by dotID. +// Returns error if either dotID is empty or it is a compound keypath. +func (vars Vars) Set(dotID string, value interface{}) error { dotID = strings.TrimSpace(dotID) if len(dotID) == 0 { - panic(ErrVarsRoot) - } else if strings.IndexByte(dotID, keypathSeparator[0]) >= 0 { - panic("cannot set a nested key of a pipeline.Vars") + return ErrVarsRoot + } else if strings.Contains(dotID, KeypathSeparator) { + return errors.Wrapf(ErrVarsSetNested, "%s", dotID) } - vars.vars[dotID] = value -} - -type Keypath [2][]byte -var keypathSeparator = []byte(".") - -func newKeypathFromString(keypathStr string) (Keypath, error) { - if len(keypathStr) == 0 { - return Keypath{}, nil - } - // The bytes package uses platform-dependent hardware optimizations and - // avoids the extra allocations that are required to work with strings. - // Keypaths have to be parsed quite a bit, so let's do it well. - kp := []byte(keypathStr) - - n := 1 + bytes.Count(kp, keypathSeparator) - if n > 2 { - return Keypath{}, errors.Wrapf(ErrKeypathTooDeep, "while parsing keypath '%v'", keypathStr) - } - idx := bytes.IndexByte(kp, keypathSeparator[0]) - if idx == -1 || idx == len(kp)-1 { - return Keypath{kp, nil}, nil - } - return Keypath{kp[:idx], kp[idx+1:]}, nil -} + vars.vars[dotID] = value -func (keypath Keypath) NumParts() int { - switch { - case keypath[0] == nil && keypath[1] == nil: - return 0 - case keypath[0] != nil && keypath[1] == nil: - return 1 - case keypath[0] == nil && keypath[1] != nil: - panic("invariant violation: keypath part 1 is non-nil but part 0 is nil") - default: - return 2 - } + return nil } -func (keypath Keypath) String() string { - switch keypath.NumParts() { - case 0: - return "(empty)" - case 1: - return string(keypath[0]) - case 2: - return string(keypath[0]) + string(keypathSeparator) + string(keypath[1]) - default: - panic("invariant violation: keypath must have 0, 1, or 2 parts") +// Copy makes a copy of Vars by copying the underlying map. +// Used by scheduler for new tasks to avoid data races. +func (vars Vars) Copy() Vars { + newVars := make(map[string]interface{}) + // No need to copy recursively, because only the top-level map is mutable (see Set()). + for k, v := range vars.vars { + newVars[k] = v } + return NewVarsFrom(newVars) } diff --git a/core/services/pipeline/variables_test.go b/core/services/pipeline/variables_test.go index e9bef4ca377..9f9a7845986 100644 --- a/core/services/pipeline/variables_test.go +++ b/core/services/pipeline/variables_test.go @@ -4,13 +4,29 @@ import ( "testing" "github.com/pkg/errors" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink/core/services/pipeline" - "github.com/smartcontractkit/chainlink/core/services/pipeline/mocks" ) +func TestVars_Set(t *testing.T) { + t.Parallel() + + vars := pipeline.NewVarsFrom(nil) + + err := vars.Set("xyz", "foo") + require.NoError(t, err) + v, err := vars.Get("xyz") + require.NoError(t, err) + require.Equal(t, "foo", v) + + err = vars.Set(" ", "foo") + require.ErrorIs(t, err, pipeline.ErrVarsRoot) + + err = vars.Set("x.y", "foo") + require.ErrorIs(t, err, pipeline.ErrVarsSetNested) +} + func TestVars_Get(t *testing.T) { t.Parallel() @@ -54,10 +70,12 @@ func TestVars_Get(t *testing.T) { }, }) _, err := vars.Get("foo.bar.chainlink") - require.Equal(t, pipeline.ErrKeypathTooDeep, errors.Cause(err)) + require.Equal(t, pipeline.ErrWrongKeypath, errors.Cause(err)) }) t.Run("errors when getting a value at a keypath where the first part is not a map/slice", func(t *testing.T) { + t.Parallel() + vars := pipeline.NewVarsFrom(map[string]interface{}{ "foo": 123, }) @@ -66,276 +84,40 @@ func TestVars_Get(t *testing.T) { }) t.Run("errors when getting a value at a keypath with more than 2 components", func(t *testing.T) { + t.Parallel() + vars := pipeline.NewVarsFrom(map[string]interface{}{ "foo": 123, }) _, err := vars.Get("foo.bar.baz") - require.Equal(t, pipeline.ErrKeypathTooDeep, errors.Cause(err)) + require.Equal(t, pipeline.ErrWrongKeypath, errors.Cause(err)) }) -} - -func TestResolveValue(t *testing.T) { - t.Parallel() - t.Run("calls getters in order until the first one that returns without ErrParameterEmpty", func(t *testing.T) { + t.Run("index out of range", func(t *testing.T) { t.Parallel() - param := new(mocks.PipelineParamUnmarshaler) - param.On("UnmarshalPipelineParam", mock.Anything).Return(nil) - - called := []int{} - getters := []pipeline.GetterFunc{ - func() (interface{}, error) { - called = append(called, 0) - return nil, errors.Wrap(pipeline.ErrParameterEmpty, "make sure it still notices when wrapped") - }, - func() (interface{}, error) { - called = append(called, 1) - return 123, nil - }, - func() (interface{}, error) { - called = append(called, 2) - return 123, nil - }, - } - - err := pipeline.ResolveParam(param, getters) - require.NoError(t, err) - require.Equal(t, []int{0, 1}, called) - - param.AssertExpectations(t) - }) - - t.Run("returns any GetterFunc error that isn't ErrParameterEmpty", func(t *testing.T) { - t.Parallel() - - param := new(mocks.PipelineParamUnmarshaler) - called := []int{} - expectedErr := errors.New("some other issue") - - getters := []pipeline.GetterFunc{ - func() (interface{}, error) { - called = append(called, 0) - return nil, expectedErr - }, - func() (interface{}, error) { - called = append(called, 1) - return 123, nil - }, - func() (interface{}, error) { - called = append(called, 2) - return 123, nil - }, - } - - err := pipeline.ResolveParam(param, getters) - require.Equal(t, expectedErr, err) - require.Equal(t, []int{0}, called) - }) - - t.Run("calls UnmarshalPipelineParam with the value obtained from the GetterFuncs", func(t *testing.T) { - t.Parallel() - - expectedValue := 123 - - param := new(mocks.PipelineParamUnmarshaler) - param.On("UnmarshalPipelineParam", expectedValue).Return(nil) - - getters := []pipeline.GetterFunc{ - func() (interface{}, error) { - return expectedValue, nil - }, - } - - err := pipeline.ResolveParam(param, getters) - require.NoError(t, err) - - param.AssertExpectations(t) - }) - - t.Run("returns any error returned by UnmarshalPipelineParam", func(t *testing.T) { - t.Parallel() - - expectedValue := 123 - expectedErr := errors.New("some issue") - - param := new(mocks.PipelineParamUnmarshaler) - param.On("UnmarshalPipelineParam", expectedValue).Return(expectedErr) - - getters := []pipeline.GetterFunc{ - func() (interface{}, error) { - return expectedValue, nil - }, - } - - err := pipeline.ResolveParam(param, getters) - require.Equal(t, expectedErr, err) - - param.AssertExpectations(t) - }) -} - -func TestGetters_VarExpr(t *testing.T) { - t.Parallel() - - vars := pipeline.NewVarsFrom(map[string]interface{}{ - "foo": map[string]interface{}{ - "bar": 42, - }, - }) - - tests := []struct { - expr string - value interface{} - err error - }{ - {"$(foo.bar)", 42, nil}, - {" $(foo.bar)", 42, nil}, - {"$(foo.bar) ", 42, nil}, - {"$( foo.bar)", 42, nil}, - {"$(foo.bar )", 42, nil}, - {"$( foo.bar )", 42, nil}, - {" $( foo.bar )", 42, nil}, - {"$()", nil, pipeline.ErrVarsRoot}, - {"$(foo.bar", nil, pipeline.ErrParameterEmpty}, - {"$foo.bar)", nil, pipeline.ErrParameterEmpty}, - {"(foo.bar)", nil, pipeline.ErrParameterEmpty}, - {"foo.bar", nil, pipeline.ErrParameterEmpty}, - } - - for _, test := range tests { - test := test - t.Run(test.expr, func(t *testing.T) { - val, err := pipeline.VarExpr(test.expr, vars)() - require.Equal(t, test.value, val) - require.Equal(t, test.err, errors.Cause(err)) + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []interface{}{1, "bar", false}, }) - } -} - -func TestGetters_NonemptyString(t *testing.T) { - t.Parallel() - - t.Run("returns any non-empty string", func(t *testing.T) { - t.Parallel() - val, err := pipeline.NonemptyString("foo bar")() - require.NoError(t, err) - require.Equal(t, "foo bar", val) - }) - t.Run("returns ErrParameterEmpty when given an empty string (including only spaces)", func(t *testing.T) { - t.Parallel() - _, err := pipeline.NonemptyString("")() - require.Equal(t, pipeline.ErrParameterEmpty, errors.Cause(err)) - _, err = pipeline.NonemptyString(" ")() - require.Equal(t, pipeline.ErrParameterEmpty, errors.Cause(err)) - }) -} - -func TestGetters_Input(t *testing.T) { - t.Parallel() + _, err := vars.Get("foo.4") + require.ErrorIs(t, err, pipeline.ErrIndexOutOfRange) - t.Run("returns the requested input's Value and Error if they exist", func(t *testing.T) { - t.Parallel() - expectedVal := "bar" - expectedErr := errors.New("some err") - val, err := pipeline.Input([]pipeline.Result{{Value: "foo"}, {Value: expectedVal, Error: expectedErr}, {Value: "baz"}}, 1)() - require.Equal(t, expectedVal, val) - require.Equal(t, expectedErr, err) - }) - - t.Run("returns ErrParameterEmpty if the specified input does not exist", func(t *testing.T) { - t.Parallel() - _, err := pipeline.Input([]pipeline.Result{{Value: "foo"}}, 1)() - require.Equal(t, pipeline.ErrParameterEmpty, errors.Cause(err)) + _, err = vars.Get("foo.-1") + require.ErrorIs(t, err, pipeline.ErrIndexOutOfRange) }) } -func TestGetters_Inputs(t *testing.T) { +func TestVars_Copy(t *testing.T) { t.Parallel() - theErr := errors.New("some issue") - - tests := []struct { - name string - inputs []pipeline.Result - expected []interface{} - expectedErr error - }{ - { - "returns the values and errors", - []pipeline.Result{ - {Value: "foo"}, - {Error: theErr}, - {Value: "baz"}, - }, - []interface{}{"foo", theErr, "baz"}, nil, + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "nested": map[string]interface{}{ + "foo": "zet", }, - } - - for _, test := range tests { - test := test - t.Run("returns all of the inputs' Values if the provided inputs meet the spec", func(t *testing.T) { - t.Parallel() - - val, err := pipeline.Inputs(test.inputs)() - require.Equal(t, test.expectedErr, errors.Cause(err)) - require.Equal(t, test.expected, val) - }) - } -} - -func TestKeypath(t *testing.T) { - t.Run("can be constructed from a period-delimited string with 2 or fewer parts", func(t *testing.T) { - kp, err := pipeline.NewKeypathFromString("") - require.NoError(t, err) - require.Equal(t, pipeline.Keypath{nil, nil}, kp) - - kp, err = pipeline.NewKeypathFromString("foo") - require.NoError(t, err) - require.Equal(t, pipeline.Keypath{[]byte("foo"), nil}, kp) - - kp, err = pipeline.NewKeypathFromString("foo.bar") - require.NoError(t, err) - require.Equal(t, pipeline.Keypath{[]byte("foo"), []byte("bar")}, kp) - }) - - t.Run("errors if constructor is passed more than 2 parts", func(t *testing.T) { - _, err := pipeline.NewKeypathFromString("foo.bar.baz") - require.Equal(t, pipeline.ErrKeypathTooDeep, errors.Cause(err)) + "bar": 321, }) - t.Run("accurately reports its NumParts", func(t *testing.T) { - kp, err := pipeline.NewKeypathFromString("") - require.NoError(t, err) - require.Equal(t, pipeline.Keypath{nil, nil}, kp) - require.Equal(t, 0, kp.NumParts()) - - kp, err = pipeline.NewKeypathFromString("foo") - require.NoError(t, err) - require.Equal(t, pipeline.Keypath{[]byte("foo"), nil}, kp) - require.Equal(t, 1, kp.NumParts()) - - kp, err = pipeline.NewKeypathFromString("foo.bar") - require.NoError(t, err) - require.Equal(t, pipeline.Keypath{[]byte("foo"), []byte("bar")}, kp) - require.Equal(t, 2, kp.NumParts()) - }) - - t.Run("stringifies correctly", func(t *testing.T) { - kp, err := pipeline.NewKeypathFromString("") - require.NoError(t, err) - require.Equal(t, pipeline.Keypath{nil, nil}, kp) - require.Equal(t, "(empty)", kp.String()) - - kp, err = pipeline.NewKeypathFromString("foo") - require.NoError(t, err) - require.Equal(t, pipeline.Keypath{[]byte("foo"), nil}, kp) - require.Equal(t, "foo", kp.String()) - - kp, err = pipeline.NewKeypathFromString("foo.bar") - require.NoError(t, err) - require.Equal(t, pipeline.Keypath{[]byte("foo"), []byte("bar")}, kp) - require.Equal(t, "foo.bar", kp.String()) - }) + varsCopy := vars.Copy() + require.Equal(t, vars, varsCopy) } diff --git a/core/services/promreporter/prom_reporter.go b/core/services/promreporter/prom_reporter.go index da3ad9e2765..cf1e4a8b175 100644 --- a/core/services/promreporter/prom_reporter.go +++ b/core/services/promreporter/prom_reporter.go @@ -24,7 +24,7 @@ type ( db *sql.DB lggr logger.Logger backend PrometheusBackend - newHeads *utils.Mailbox + newHeads *utils.Mailbox[*evmtypes.Head] chStop chan struct{} wgDone sync.WaitGroup reportPeriod time.Duration @@ -103,7 +103,7 @@ func NewPromReporter(db *sql.DB, lggr logger.Logger, opts ...interface{}) *promR db: db, lggr: lggr.Named("PromReporter"), backend: backend, - newHeads: utils.NewMailbox(1), + newHeads: utils.NewMailbox[*evmtypes.Head](1), chStop: chStop, reportPeriod: period, } @@ -138,11 +138,10 @@ func (pr *promReporter) eventLoop() { for { select { case <-pr.newHeads.Notify(): - item, exists := pr.newHeads.Retrieve() + head, exists := pr.newHeads.Retrieve() if !exists { continue } - head := evmtypes.AsHead(item) pr.reportHeadMetrics(ctx, head) case <-time.After(pr.reportPeriod): if err := errors.Wrap(pr.reportPipelineRunStats(ctx), "reportPipelineRunStats failed"); err != nil { diff --git a/core/services/relay/delegate.go b/core/services/relay/delegate.go index 7675bf13b4e..12f0a37d6cc 100644 --- a/core/services/relay/delegate.go +++ b/core/services/relay/delegate.go @@ -170,18 +170,12 @@ func (d delegate) NewOCR2Provider(externalJobID uuid.UUID, s interface{}) (types return r.NewOCR2Provider(externalJobID, solana.OCR2Spec{ ID: spec.ID, IsBootstrap: spec.IsBootstrapPeer, - NodeEndpointHTTP: config.NodeEndpointHTTP, + ChainID: config.ChainID, ProgramID: programID, StateID: stateID, StoreProgramID: storeProgramID, TransmissionsID: transmissionsID, TransmissionSigner: transmissionSigner, - UsePreflight: config.UsePreflight, - Commitment: config.Commitment, - TxTimeout: config.TxTimeout, - PollingInterval: config.PollingInterval, - PollingCtxTimeout: config.PollingCtxTimeout, - StaleTimeout: config.StaleTimeout, }) case types.Terra: r, exists := d.relayers[types.Terra] diff --git a/core/services/relay/delegate_test.go b/core/services/relay/delegate_test.go index 62ee1594f1d..ad315550bee 100644 --- a/core/services/relay/delegate_test.go +++ b/core/services/relay/delegate_test.go @@ -12,11 +12,14 @@ import ( "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink-solana/pkg/solana" + solconfig "github.com/smartcontractkit/chainlink-solana/pkg/solana/config" + soldb "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" "github.com/smartcontractkit/chainlink-terra/pkg/terra" terradb "github.com/smartcontractkit/chainlink-terra/pkg/terra/db" "github.com/smartcontractkit/sqlx" chainsMock "github.com/smartcontractkit/chainlink/core/chains/evm/mocks" + solMock "github.com/smartcontractkit/chainlink/core/chains/solana/mocks" terraMock "github.com/smartcontractkit/chainlink/core/chains/terra/mocks" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/job" @@ -59,6 +62,15 @@ func TestNewOCR2Provider(t *testing.T) { terraChains := new(terraMock.ChainSet) terraChains.On("Chain", mock.Anything, "Chainlink-99").Return(terraChain, nil).Times(2) + // set up solana mocks + solChain := new(solMock.Chain) + solChain.On("Config").Return(solconfig.NewConfig(soldb.ChainCfg{}, lggr)) + solChain.On("TxManager").Return(new(solMock.TxManager)).Once() + solChain.On("Reader").Return(new(solMock.Reader), nil).Once() + + solChains := new(solMock.ChainSet) + solChains.On("Chain", mock.Anything, "Chainlink-99").Return(solChain, nil).Once() + d := relay.NewDelegate(keystore) // struct for testing multiple specs @@ -89,7 +101,7 @@ func TestNewOCR2Provider(t *testing.T) { } d.AddRelayer(relaytypes.EVM, evm.NewRelayer(&sqlx.DB{}, &chainsMock.ChainSet{}, lggr)) - d.AddRelayer(relaytypes.Solana, solana.NewRelayer(lggr)) + d.AddRelayer(relaytypes.Solana, solana.NewRelayer(lggr, solChains)) d.AddRelayer(relaytypes.Terra, terra.NewRelayer(lggr, terraChains)) for _, s := range specs { @@ -111,4 +123,6 @@ func TestNewOCR2Provider(t *testing.T) { solKey.AssertExpectations(t) terraChains.AssertExpectations(t) terraChain.AssertExpectations(t) + solChains.AssertExpectations(t) + solChain.AssertExpectations(t) } diff --git a/core/services/relay/evm/config_tracker.go b/core/services/relay/evm/config_tracker.go index 0e4409c1d5b..ee05f2cd706 100644 --- a/core/services/relay/evm/config_tracker.go +++ b/core/services/relay/evm/config_tracker.go @@ -13,10 +13,10 @@ import ( "github.com/pkg/errors" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2/types" - "github.com/smartcontractkit/chainlink/core/chains" evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" httypes "github.com/smartcontractkit/chainlink/core/chains/evm/headtracker/types" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/utils" ) @@ -28,7 +28,7 @@ type ConfigTracker struct { client evmclient.Client addr common.Address contractABI abi.ABI - chainType chains.ChainType + chainType config.ChainType latestBlockHeight int64 latestBlockHeightMu sync.RWMutex @@ -40,7 +40,7 @@ type ConfigTracker struct { } // NewConfigTracker builds a new config tracker -func NewConfigTracker(lggr logger.Logger, contractABI abi.ABI, client evmclient.Client, addr common.Address, chainType chains.ChainType, headBroadcaster httypes.HeadBroadcaster) *ConfigTracker { +func NewConfigTracker(lggr logger.Logger, contractABI abi.ABI, client evmclient.Client, addr common.Address, chainType config.ChainType, headBroadcaster httypes.HeadBroadcaster) *ConfigTracker { return &ConfigTracker{ client: client, addr: addr, diff --git a/core/services/relay/evm/request_round_db.go b/core/services/relay/evm/request_round_db.go index 3a8fbcd372a..781335addaa 100644 --- a/core/services/relay/evm/request_round_db.go +++ b/core/services/relay/evm/request_round_db.go @@ -12,8 +12,6 @@ import ( "github.com/smartcontractkit/chainlink/core/services/pg" ) -//go:generate mockery --name OCRContractTrackerDB --output ./mocks/ --case=underscore - // RequestRoundDB stores requested rounds for querying by the median plugin. type RequestRoundDB interface { SaveLatestRoundRequested(tx pg.Queryer, rr ocr2aggregator.OCR2AggregatorRoundRequested) error diff --git a/core/services/synchronization/explorer_client_test.go b/core/services/synchronization/explorer_client_test.go index 1dffd1bffff..e4f0b35ae2b 100644 --- a/core/services/synchronization/explorer_client_test.go +++ b/core/services/synchronization/explorer_client_test.go @@ -1,7 +1,6 @@ package synchronization_test import ( - "context" "net/http" "net/http/httptest" "net/url" @@ -28,13 +27,14 @@ func TestWebSocketClient_ReconnectLoop(t *testing.T) { require.NoError(t, explorerClient.Start(testutils.Context(t))) cltest.CallbackOrTimeout(t, "ws client connects", func() { <-wsserver.Connected - }, 1*time.Second) + }, testutils.WaitTimeout(t)) // reconnect after server disconnect wsserver.WriteCloseMessage() cltest.CallbackOrTimeout(t, "ws client reconnects", func() { + <-wsserver.Disconnected <-wsserver.Connected - }, 3*time.Second) + }, testutils.WaitTimeout(t)) require.NoError(t, explorerClient.Close()) } @@ -70,10 +70,10 @@ func TestWebSocketClient_Send_DefaultsToTextMessage(t *testing.T) { defer explorerClient.Close() expectation := `{"hello": "world"}` - explorerClient.Send(context.Background(), []byte(expectation)) + explorerClient.Send(testutils.Context(t), []byte(expectation)) cltest.CallbackOrTimeout(t, "receive stats", func() { require.Equal(t, expectation, <-wsserver.ReceivedText) - }, 1*time.Second) + }, testutils.WaitTimeout(t)) } func TestWebSocketClient_Send_TextMessage(t *testing.T) { @@ -85,10 +85,10 @@ func TestWebSocketClient_Send_TextMessage(t *testing.T) { defer explorerClient.Close() expectation := `{"hello": "world"}` - explorerClient.Send(context.Background(), []byte(expectation), synchronization.ExplorerTextMessage) + explorerClient.Send(testutils.Context(t), []byte(expectation), synchronization.ExplorerTextMessage) cltest.CallbackOrTimeout(t, "receive stats", func() { require.Equal(t, expectation, <-wsserver.ReceivedText) - }) + }, testutils.WaitTimeout(t)) } func TestWebSocketClient_Send_Binary(t *testing.T) { @@ -101,10 +101,10 @@ func TestWebSocketClient_Send_Binary(t *testing.T) { address := common.HexToAddress("0xabc123") addressBytes := address.Bytes() - explorerClient.Send(context.Background(), addressBytes, synchronization.ExplorerBinaryMessage) + explorerClient.Send(testutils.Context(t), addressBytes, synchronization.ExplorerBinaryMessage) cltest.CallbackOrTimeout(t, "receive stats", func() { require.Equal(t, addressBytes, <-wsserver.ReceivedBinary) - }) + }, testutils.WaitTimeout(t)) } func TestWebSocketClient_Send_Unsupported(t *testing.T) { @@ -115,7 +115,7 @@ func TestWebSocketClient_Send_Unsupported(t *testing.T) { require.NoError(t, explorerClient.Start(testutils.Context(t))) assert.PanicsWithValue(t, "send on explorer client received unsupported message type -1", func() { - explorerClient.Send(context.Background(), []byte(`{"hello": "world"}`), -1) + explorerClient.Send(testutils.Context(t), []byte(`{"hello": "world"}`), -1) }) require.NoError(t, explorerClient.Close()) } @@ -129,18 +129,18 @@ func TestWebSocketClient_Send_WithAck(t *testing.T) { defer explorerClient.Close() expectation := `{"hello": "world"}` - explorerClient.Send(context.Background(), []byte(expectation)) + explorerClient.Send(testutils.Context(t), []byte(expectation)) cltest.CallbackOrTimeout(t, "receive stats", func() { require.Equal(t, expectation, <-wsserver.ReceivedText) err := wsserver.Broadcast(`{"result": 200}`) assert.NoError(t, err) - }) + }, testutils.WaitTimeout(t)) cltest.CallbackOrTimeout(t, "receive response", func() { - response, err := explorerClient.Receive(context.Background()) + response, err := explorerClient.Receive(testutils.Context(t)) assert.NoError(t, err) assert.NotNil(t, response) - }) + }, testutils.WaitTimeout(t)) } func TestWebSocketClient_Send_WithAckTimeout(t *testing.T) { @@ -152,16 +152,15 @@ func TestWebSocketClient_Send_WithAckTimeout(t *testing.T) { defer explorerClient.Close() expectation := `{"hello": "world"}` - explorerClient.Send(context.Background(), []byte(expectation)) + explorerClient.Send(testutils.Context(t), []byte(expectation)) cltest.CallbackOrTimeout(t, "receive stats", func() { require.Equal(t, expectation, <-wsserver.ReceivedText) - }) + }, testutils.WaitTimeout(t)) cltest.CallbackOrTimeout(t, "receive response", func() { - _, err := explorerClient.Receive(context.Background(), 100*time.Millisecond) - assert.Error(t, err) - assert.Equal(t, err, synchronization.ErrReceiveTimeout) - }, 300*time.Millisecond) + _, err := explorerClient.Receive(testutils.Context(t), 100*time.Millisecond) + assert.ErrorIs(t, err, synchronization.ErrReceiveTimeout) + }, testutils.WaitTimeout(t)) } func TestWebSocketClient_Status_ConnectAndServerDisconnect(t *testing.T) { @@ -175,31 +174,39 @@ func TestWebSocketClient_Status_ConnectAndServerDisconnect(t *testing.T) { defer explorerClient.Close() cltest.CallbackOrTimeout(t, "ws client connects", func() { <-wsserver.Connected - }, 5*time.Second) + }, testutils.WaitTimeout(t)) gomega.NewWithT(t).Eventually(func() synchronization.ConnectionStatus { return explorerClient.Status() }).Should(gomega.Equal(synchronization.ConnectionStatusConnected)) + // this triggers ConnectionStatusError and then the client gets reconnected wsserver.WriteCloseMessage() - wsserver.Close() + cltest.CallbackOrTimeout(t, "ws client disconnects and reconnects", func() { + <-wsserver.Disconnected + <-wsserver.Connected + }, testutils.WaitTimeout(t)) + + // expecting the client to reconnect gomega.NewWithT(t).Eventually(func() synchronization.ConnectionStatus { return explorerClient.Status() - }).Should(gomega.Equal(synchronization.ConnectionStatusError)) + }).Should(gomega.Equal(synchronization.ConnectionStatusConnected)) + + require.Equal(t, 1, wsserver.ConnectionsCount()) } func TestWebSocketClient_Status_ConnectError(t *testing.T) { badURL, err := url.Parse("http://badhost.com") require.NoError(t, err) - errorexplorerClient := newTestExplorerClient(t, badURL) - require.NoError(t, errorexplorerClient.Start(testutils.Context(t))) - defer errorexplorerClient.Close() - time.Sleep(100 * time.Millisecond) - - assert.Equal(t, synchronization.ConnectionStatusError, errorexplorerClient.Status()) + errorExplorerClient := newTestExplorerClient(t, badURL) + require.NoError(t, errorExplorerClient.Start(testutils.Context(t))) + defer errorExplorerClient.Close() + gomega.NewWithT(t).Eventually(func() synchronization.ConnectionStatus { + return errorExplorerClient.Status() + }).Should(gomega.Equal(synchronization.ConnectionStatusError)) } func newTestExplorerClient(t *testing.T, wsURL *url.URL) synchronization.ExplorerClient { diff --git a/core/services/synchronization/mocks/telem_client.go b/core/services/synchronization/mocks/telem_client.go index a06df100ce1..96a6856db6b 100644 --- a/core/services/synchronization/mocks/telem_client.go +++ b/core/services/synchronization/mocks/telem_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/synchronization/telemetry_ingress_batch_client_test.go b/core/services/synchronization/telemetry_ingress_batch_client_test.go index 5bdf03fcee0..ed97e1ba0e0 100644 --- a/core/services/synchronization/telemetry_ingress_batch_client_test.go +++ b/core/services/synchronization/telemetry_ingress_batch_client_test.go @@ -1,7 +1,6 @@ package synchronization_test import ( - "context" "net/url" "testing" "time" @@ -42,17 +41,17 @@ func TestTelemetryIngressBatchClient_HappyPath(t *testing.T) { // Create telemetry payloads for different contracts telemPayload1 := synchronization.TelemPayload{ - Ctx: context.Background(), + Ctx: testutils.Context(t), Telemetry: []byte("Mock telem 1"), ContractID: "0x1", } telemPayload2 := synchronization.TelemPayload{ - Ctx: context.Background(), + Ctx: testutils.Context(t), Telemetry: []byte("Mock telem 2"), ContractID: "0x2", } telemPayload3 := synchronization.TelemPayload{ - Ctx: context.Background(), + Ctx: testutils.Context(t), Telemetry: []byte("Mock telem 3"), ContractID: "0x3", } @@ -94,9 +93,9 @@ func TestTelemetryIngressBatchClient_HappyPath(t *testing.T) { telemIngressClient.Send(telemPayload2) // Wait for the telemetry to be handled - g.Eventually(contractCounter1.Load, "5s").Should(gomega.Equal(uint32(3))) - g.Eventually(contractCounter2.Load, "5s").Should(gomega.Equal(uint32(2))) - g.Eventually(contractCounter3.Load, "5s").Should(gomega.Equal(uint32(1))) + g.Eventually(func() []uint32 { + return []uint32{contractCounter1.Load(), contractCounter2.Load(), contractCounter3.Load()} + }).Should(gomega.Equal([]uint32{3, 2, 1})) // Client should shut down telemIngressClient.Close() diff --git a/core/services/vrf/delegate.go b/core/services/vrf/delegate.go index 4bf65e97744..f3f0cae294b 100644 --- a/core/services/vrf/delegate.go +++ b/core/services/vrf/delegate.go @@ -4,7 +4,6 @@ import ( "encoding/hex" "math/big" "strings" - "sync" "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" @@ -13,7 +12,9 @@ import ( "github.com/smartcontractkit/sqlx" "github.com/smartcontractkit/chainlink/core/chains/evm" + "github.com/smartcontractkit/chainlink/core/chains/evm/log" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/aggregator_v3_interface" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/batch_vrf_coordinator_v2" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/solidity_vrf_coordinator_interface" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/vrf_coordinator_v2" "github.com/smartcontractkit/chainlink/core/logger" @@ -36,9 +37,10 @@ type Delegate struct { //go:generate mockery --name GethKeyStore --output mocks/ --case=underscore type GethKeyStore interface { - GetRoundRobinAddress(addresses ...common.Address) (common.Address, error) + GetRoundRobinAddress(chainID *big.Int, addresses ...common.Address) (common.Address, error) } +//go:generate mockery --name Config --output mocks/ --case=underscore type Config interface { MinIncomingConfirmations() uint32 EvmGasLimitDefault() uint64 @@ -92,6 +94,17 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { if err != nil { return nil, err } + + // If the batch coordinator address is not provided, we will fall back to non-batched + var batchCoordinatorV2 *batch_vrf_coordinator_v2.BatchVRFCoordinatorV2 + if jb.VRFSpec.BatchCoordinatorAddress != nil { + batchCoordinatorV2, err = batch_vrf_coordinator_v2.NewBatchVRFCoordinatorV2( + jb.VRFSpec.BatchCoordinatorAddress.Address(), chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "create batch coordinator wrapper") + } + } + l := d.lggr.With( "jobID", jb.ID, "externalJobID", jb.ExternalJobID, @@ -110,27 +123,26 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { if err != nil { return nil, err } - return []job.ServiceCtx{&listenerV2{ - cfg: chain.Config(), - l: lV2, - ethClient: chain.Client(), - logBroadcaster: chain.LogBroadcaster(), - q: d.q, - coordinator: coordinatorV2, - aggregator: aggregator, - txm: chain.TxManager(), - pipelineRunner: d.pr, - gethks: d.ks.Eth(), - job: jb, - reqLogs: utils.NewHighCapacityMailbox(), - chStop: make(chan struct{}), - respCount: GetStartingResponseCountsV2(d.q, lV2, chain.Client().ChainID().Uint64(), chain.Config().EvmFinalityDepth()), - blockNumberToReqID: pairing.New(), - reqAdded: func() {}, - headBroadcaster: chain.HeadBroadcaster(), - wg: &sync.WaitGroup{}, - deduper: newLogDeduper(int(chain.Config().EvmFinalityDepth())), - }}, nil + + return []job.ServiceCtx{newListenerV2( + chain.Config(), + lV2, + chain.Client(), + chain.ID(), + chain.LogBroadcaster(), + d.q, + coordinatorV2, + batchCoordinatorV2, + aggregator, + chain.TxManager(), + d.pr, + d.ks.Eth(), + jb, + utils.NewHighCapacityMailbox[log.Broadcast](), + func() {}, + GetStartingResponseCountsV2(d.q, lV2, chain.Client().ChainID().Uint64(), chain.Config().EvmFinalityDepth()), + chain.HeadBroadcaster(), + newLogDeduper(int(chain.Config().EvmFinalityDepth())))}, nil } if _, ok := task.(*pipeline.VRFTask); ok { return []job.ServiceCtx{&listenerV1{ @@ -146,7 +158,7 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { job: jb, // Note the mailbox size effectively sets a limit on how many logs we can replay // in the event of a VRF outage. - reqLogs: utils.NewHighCapacityMailbox(), + reqLogs: utils.NewHighCapacityMailbox[log.Broadcast](), chStop: make(chan struct{}), waitOnStop: make(chan struct{}), newHead: make(chan struct{}, 1), diff --git a/core/services/vrf/delegate_test.go b/core/services/vrf/delegate_test.go index b245bfc6bdc..9a3d2162868 100644 --- a/core/services/vrf/delegate_test.go +++ b/core/services/vrf/delegate_test.go @@ -76,7 +76,7 @@ func buildVrfUni(t *testing.T, db *sqlx.DB, cfg *configtest.TestGeneralConfig) v require.NoError(t, ks.Unlock("p4SsW0rD1!@#_")) _, err := ks.Eth().Create(big.NewInt(0)) require.NoError(t, err) - submitter, err := ks.Eth().GetRoundRobinAddress() + submitter, err := ks.Eth().GetRoundRobinAddress(nil) require.NoError(t, err) vrfkey, err := ks.VRF().Create() require.NoError(t, err) diff --git a/core/services/vrf/integration_test.go b/core/services/vrf/integration_test.go index 533098e7017..1f26e2fb74d 100644 --- a/core/services/vrf/integration_test.go +++ b/core/services/vrf/integration_test.go @@ -43,7 +43,7 @@ func TestIntegration_VRF_JPV2(t *testing.T) { for _, tt := range tests { test := tt t.Run(test.name, func(t *testing.T) { - config, _ := heavyweight.FullTestDB(t, fmt.Sprintf("vrf_jpv2_%v", test.eip1559), true, true) + config, _ := heavyweight.FullTestDB(t, fmt.Sprintf("vrf_jpv2_%v", test.eip1559)) config.Overrides.GlobalEvmEIP1559DynamicFees = null.BoolFrom(test.eip1559) key1 := cltest.MustGenerateRandomKey(t) key2 := cltest.MustGenerateRandomKey(t) @@ -121,7 +121,7 @@ func TestIntegration_VRF_JPV2(t *testing.T) { } func TestIntegration_VRF_WithBHS(t *testing.T) { - config, _ := heavyweight.FullTestDB(t, "vrf_with_bhs", true, true) + config, _ := heavyweight.FullTestDB(t, "vrf_with_bhs") config.Overrides.GlobalEvmEIP1559DynamicFees = null.BoolFrom(true) key := cltest.MustGenerateRandomKey(t) cu := newVRFCoordinatorUniverse(t, key) diff --git a/core/services/vrf/integration_v2_test.go b/core/services/vrf/integration_v2_test.go index 406c53387aa..f1ec9fcd7dd 100644 --- a/core/services/vrf/integration_v2_test.go +++ b/core/services/vrf/integration_v2_test.go @@ -32,6 +32,7 @@ import ( "github.com/smartcontractkit/chainlink/core/chains/evm/types" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/cltest/heavyweight" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/batch_vrf_coordinator_v2" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/blockhash_store" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/link_token_interface" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/mock_v3_aggregator_contract" @@ -75,6 +76,8 @@ type coordinatorV2Universe struct { rootContract *vrf_coordinator_v2.VRFCoordinatorV2 rootContractAddress common.Address + batchCoordinatorContract *batch_vrf_coordinator_v2.BatchVRFCoordinatorV2 + batchCoordinatorContractAddress common.Address linkContract *link_token_interface.LinkToken linkContractAddress common.Address bhsContract *blockhash_store.BlockhashStore @@ -162,7 +165,15 @@ func newVRFCoordinatorV2Universe(t *testing.T, key ethkey.KeyV2, numConsumers in coordinatorAddress, _, coordinatorContract, err := vrf_coordinator_v2.DeployVRFCoordinatorV2( neil, backend, linkAddress, bhsAddress, linkEthFeed /* linkEth*/) - require.NoError(t, err, "failed to deploy VRFCoordinator contract to simulated ethereum blockchain") + require.NoError(t, err, "failed to deploy VRFCoordinatorV2 contract to simulated ethereum blockchain") + backend.Commit() + + // Deploy batch VRF V2 coordinator + batchCoordinatorAddress, _, batchCoordinatorContract, err := + batch_vrf_coordinator_v2.DeployBatchVRFCoordinatorV2( + neil, backend, coordinatorAddress, + ) + require.NoError(t, err, "failed to deploy BatchVRFCoordinatorV2 contract to simulated ethereum blockchain") backend.Commit() // Create the VRF consumers. @@ -204,7 +215,7 @@ func newVRFCoordinatorV2Universe(t *testing.T, key ethkey.KeyV2, numConsumers in // Set the configuration on the coordinator. _, err = coordinatorContract.SetConfig(neil, uint16(1), // minRequestConfirmations - uint32(1e6), // gas limit + uint32(2.5e6), // gas limit uint32(60*60*24), // stalenessSeconds uint32(vrf.GasAfterPaymentCalculation), // gasAfterPaymentCalculation big.NewInt(1e16), // 0.01 eth per link fallbackLinkPrice @@ -228,6 +239,9 @@ func newVRFCoordinatorV2Universe(t *testing.T, key ethkey.KeyV2, numConsumers in consumerContracts: consumerContracts, consumerContractAddresses: consumerContractAddresses, + batchCoordinatorContract: batchCoordinatorContract, + batchCoordinatorContractAddress: batchCoordinatorAddress, + revertingConsumerContract: revertingConsumerContract, revertingConsumerContractAddress: revertingConsumerContractAddress, @@ -293,7 +307,13 @@ func subscribeVRF( return sub, subID } -func createVRFJobs(t *testing.T, fromKeys [][]ethkey.KeyV2, app *cltest.TestApplication, uni coordinatorV2Universe) (jobs []job.Job) { +func createVRFJobs( + t *testing.T, + fromKeys [][]ethkey.KeyV2, + app *cltest.TestApplication, + uni coordinatorV2Universe, + batchEnabled bool, +) (jobs []job.Job) { // Create separate jobs for each gas lane and register their keys for i, keys := range fromKeys { var keyStrs []string @@ -310,9 +330,13 @@ func createVRFJobs(t *testing.T, fromKeys [][]ethkey.KeyV2, app *cltest.TestAppl JobID: jid.String(), Name: fmt.Sprintf("vrf-primary-%d", i), CoordinatorAddress: uni.rootContractAddress.String(), + BatchCoordinatorAddress: uni.batchCoordinatorContractAddress.String(), + BatchFulfillmentEnabled: batchEnabled, MinIncomingConfirmations: incomingConfs, PublicKey: vrfkey.PublicKey.String(), FromAddresses: keyStrs, + BackoffInitialDelay: 10 * time.Millisecond, + BackoffMaxDelay: time.Second, V2: true, }).Toml() jb, err := vrf.ValidatedVRFSpec(s) @@ -355,9 +379,9 @@ func requestRandomnessAndAssertRandomWordsRequestedEvent( keyHash common.Hash, subID uint64, numWords uint32, + cbGasLimit uint32, uni coordinatorV2Universe, ) (*big.Int, uint64) { - cbGasLimit := uint32(500_000) minRequestConfirmations := uint16(2) _, err := vrfConsumerHandle.TestRequestRandomness( consumerOwner, @@ -482,8 +506,173 @@ func mine(t *testing.T, requestID *big.Int, subID uint64, uni coordinatorV2Unive }, cltest.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) } +func mineBatch(t *testing.T, requestIDs []*big.Int, subID uint64, uni coordinatorV2Universe, db *sqlx.DB) bool { + requestIDMap := map[string]bool{} + for _, requestID := range requestIDs { + requestIDMap[common.BytesToHash(requestID.Bytes()).String()] = false + } + return gomega.NewWithT(t).Eventually(func() bool { + uni.backend.Commit() + var txs []txmgr.EthTx + err := db.Select(&txs, ` + SELECT * FROM eth_txes + WHERE eth_txes.state = 'confirmed' + AND CAST(eth_txes.meta->>'SubId' AS NUMERIC) = $1 + `, subID) + require.NoError(t, err) + for _, tx := range txs { + meta, err := tx.GetMeta() + require.NoError(t, err) + t.Log("meta:", meta) + for _, requestID := range meta.RequestIDs { + if _, ok := requestIDMap[requestID.String()]; ok { + requestIDMap[requestID.String()] = true + } + } + } + foundAll := true + for _, found := range requestIDMap { + foundAll = foundAll && found + } + t.Log("requestIDMap:", requestIDMap) + return foundAll + }, cltest.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) +} + +func TestVRFV2Integration_SingleConsumer_HappyPath_BatchFulfillment(t *testing.T) { + config, db := heavyweight.FullTestDB(t, "vrfv2_singleconsumer_batch_happypath") + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + app := cltest.NewApplicationWithConfigAndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey) + config.Overrides.GlobalEvmGasLimitDefault = null.NewInt(5e6, true) + config.Overrides.GlobalMinIncomingConfirmations = null.IntFrom(2) + consumer := uni.vrfConsumers[0] + consumerContract := uni.consumerContracts[0] + consumerContractAddress := uni.consumerContractAddresses[0] + + // Create a subscription and fund with 5 LINK. + subID := subscribeAndAssertSubscriptionCreatedEvent(t, consumerContract, consumer, consumerContractAddress, big.NewInt(5e18), uni) + + // Create gas lane. + key1, err := app.KeyStore.Eth().Create(big.NewInt(1337)) + require.NoError(t, err) + sendEth(t, ownerKey, uni.backend, key1.Address.Address(), 10) + configureSimChain(t, app, map[string]types.ChainCfg{ + key1.Address.String(): { + EvmMaxGasPriceWei: utils.NewBig(big.NewInt(10e9)), // 10 gwei + }, + }, big.NewInt(10e9)) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job using key1 and key2 on the same gas lane. + jbs := createVRFJobs(t, [][]ethkey.KeyV2{{key1}}, app, uni, true) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Make some randomness requests. + numWords := uint32(2) + reqIDs := []*big.Int{} + for i := 0; i < 5; i++ { + requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, uni) + reqIDs = append(reqIDs, requestID) + } + + // Wait for fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 5 + }, cltest.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + mineBatch(t, reqIDs, subID, uni, db) + + for i, requestID := range reqIDs { + // Assert correct state of RandomWordsFulfilled event. + // The last request will be the successful one because of the way the example + // contract is written. + if i == (len(reqIDs) - 1) { + assertRandomWordsFulfilled(t, requestID, true, uni) + } else { + assertRandomWordsFulfilled(t, requestID, false, uni) + } + } + + // Assert correct number of random words sent by coordinator. + assertNumRandomWords(t, consumerContract, numWords) +} + +func TestVRFV2Integration_SingleConsumer_HappyPath_BatchFulfillment_BigGasCallback(t *testing.T) { + config, db := heavyweight.FullTestDB(t, "vrfv2_singleconsumer_batch_bigcallback") + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + app := cltest.NewApplicationWithConfigAndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey) + config.Overrides.GlobalEvmGasLimitDefault = null.NewInt(5e6, true) + config.Overrides.GlobalMinIncomingConfirmations = null.IntFrom(2) + consumer := uni.vrfConsumers[0] + consumerContract := uni.consumerContracts[0] + consumerContractAddress := uni.consumerContractAddresses[0] + + // Create a subscription and fund with 5 LINK. + subID := subscribeAndAssertSubscriptionCreatedEvent(t, consumerContract, consumer, consumerContractAddress, big.NewInt(5e18), uni) + + // Create gas lane. + key1, err := app.KeyStore.Eth().Create(big.NewInt(1337)) + require.NoError(t, err) + sendEth(t, ownerKey, uni.backend, key1.Address.Address(), 10) + configureSimChain(t, app, map[string]types.ChainCfg{ + key1.Address.String(): { + EvmMaxGasPriceWei: utils.NewBig(big.NewInt(10e9)), // 10 gwei + }, + }, big.NewInt(10e9)) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job using key1 and key2 on the same gas lane. + jbs := createVRFJobs(t, [][]ethkey.KeyV2{{key1}}, app, uni, true) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Make some randomness requests with low max gas callback limits. + // These should all be included in the same batch. + numWords := uint32(2) + reqIDs := []*big.Int{} + for i := 0; i < 5; i++ { + requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 100_000, uni) + reqIDs = append(reqIDs, requestID) + } + + // Make one randomness request with the max callback gas limit. + // It should live in a batch on it's own. + requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 2_500_000, uni) + reqIDs = append(reqIDs, requestID) + + // Wait for fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 6 + }, cltest.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + mineBatch(t, reqIDs, subID, uni, db) + + for i, requestID := range reqIDs { + // Assert correct state of RandomWordsFulfilled event. + // The last request will be the successful one because of the way the example + // contract is written. + if i == (len(reqIDs) - 1) { + assertRandomWordsFulfilled(t, requestID, true, uni) + } else { + assertRandomWordsFulfilled(t, requestID, false, uni) + } + } + + // Assert correct number of random words sent by coordinator. + assertNumRandomWords(t, consumerContract, numWords) +} + func TestVRFV2Integration_SingleConsumer_HappyPath(t *testing.T) { - config, db := heavyweight.FullTestDB(t, "vrfv2_singleconsumer_happypath", true, true) + config, db := heavyweight.FullTestDB(t, "vrfv2_singleconsumer_happypath") ownerKey := cltest.MustGenerateRandomKey(t) uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) app := cltest.NewApplicationWithConfigAndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey) @@ -514,12 +703,12 @@ func TestVRFV2Integration_SingleConsumer_HappyPath(t *testing.T) { require.NoError(t, app.Start(testutils.Context(t))) // Create VRF job using key1 and key2 on the same gas lane. - jbs := createVRFJobs(t, [][]ethkey.KeyV2{{key1, key2}}, app, uni) + jbs := createVRFJobs(t, [][]ethkey.KeyV2{{key1, key2}}, app, uni, false) keyHash := jbs[0].VRFSpec.PublicKey.MustHash() // Make the first randomness request. numWords := uint32(20) - requestID1, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, uni) + requestID1, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, uni) // Wait for fulfillment to be queued. gomega.NewGomegaWithT(t).Eventually(func() bool { @@ -537,7 +726,7 @@ func TestVRFV2Integration_SingleConsumer_HappyPath(t *testing.T) { assertRandomWordsFulfilled(t, requestID1, true, uni) // Make the second randomness request and assert fulfillment is successful - requestID2, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, uni) + requestID2, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, uni) gomega.NewGomegaWithT(t).Eventually(func() bool { uni.backend.Commit() runs, err := app.PipelineORM().GetAllRuns() @@ -564,7 +753,7 @@ func TestVRFV2Integration_SingleConsumer_HappyPath(t *testing.T) { } func TestVRFV2Integration_SingleConsumer_NeedsBlockhashStore(t *testing.T) { - config, db := heavyweight.FullTestDB(t, "vrfv2_needs_blockhash_store", true, true) + config, db := heavyweight.FullTestDB(t, "vrfv2_needs_blockhash_store") ownerKey := cltest.MustGenerateRandomKey(t) uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) app := cltest.NewApplicationWithConfigAndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey) @@ -599,7 +788,7 @@ func TestVRFV2Integration_SingleConsumer_NeedsBlockhashStore(t *testing.T) { }, big.NewInt(10e9)) // Create VRF job. - vrfJobs := createVRFJobs(t, [][]ethkey.KeyV2{{vrfKey}}, app, uni) + vrfJobs := createVRFJobs(t, [][]ethkey.KeyV2{{vrfKey}}, app, uni, false) keyHash := vrfJobs[0].VRFSpec.PublicKey.MustHash() _ = createAndStartBHSJob( @@ -608,7 +797,7 @@ func TestVRFV2Integration_SingleConsumer_NeedsBlockhashStore(t *testing.T) { // Make the randomness request. It will not yet succeed since it is underfunded. numWords := uint32(20) - requestID, requestBlock := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, uni) + requestID, requestBlock := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, uni) // Wait 101 blocks. for i := 0; i < 100; i++ { @@ -663,7 +852,7 @@ func TestVRFV2Integration_SingleConsumer_NeedsBlockhashStore(t *testing.T) { } func TestVRFV2Integration_SingleConsumer_NeedsTopUp(t *testing.T) { - config, db := heavyweight.FullTestDB(t, "vrfv2_singleconsumer_needstopup", true, true) + config, db := heavyweight.FullTestDB(t, "vrfv2_singleconsumer_needstopup") ownerKey := cltest.MustGenerateRandomKey(t) uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) app := cltest.NewApplicationWithConfigAndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey) @@ -688,11 +877,11 @@ func TestVRFV2Integration_SingleConsumer_NeedsTopUp(t *testing.T) { require.NoError(t, app.Start(testutils.Context(t))) // Create VRF job. - jbs := createVRFJobs(t, [][]ethkey.KeyV2{{key}}, app, uni) + jbs := createVRFJobs(t, [][]ethkey.KeyV2{{key}}, app, uni, false) keyHash := jbs[0].VRFSpec.PublicKey.MustHash() numWords := uint32(20) - requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, uni) + requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, uni) // Fulfillment will not be enqueued because subscriber doesn't have enough LINK. gomega.NewGomegaWithT(t).Consistently(func() bool { @@ -728,7 +917,7 @@ func TestVRFV2Integration_SingleConsumer_NeedsTopUp(t *testing.T) { } func TestVRFV2Integration_SingleConsumer_MultipleGasLanes(t *testing.T) { - config, db := heavyweight.FullTestDB(t, "vrfv2_singleconsumer_multiplegaslanes", true, true) + config, db := heavyweight.FullTestDB(t, "vrfv2_singleconsumer_multiplegaslanes") ownerKey := cltest.MustGenerateRandomKey(t) uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) app := cltest.NewApplicationWithConfigAndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey) @@ -760,12 +949,12 @@ func TestVRFV2Integration_SingleConsumer_MultipleGasLanes(t *testing.T) { require.NoError(t, app.Start(testutils.Context(t))) // Create VRF jobs. - jbs := createVRFJobs(t, [][]ethkey.KeyV2{{cheapKey}, {expensiveKey}}, app, uni) + jbs := createVRFJobs(t, [][]ethkey.KeyV2{{cheapKey}, {expensiveKey}}, app, uni, false) cheapHash := jbs[0].VRFSpec.PublicKey.MustHash() expensiveHash := jbs[1].VRFSpec.PublicKey.MustHash() numWords := uint32(20) - cheapRequestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, cheapHash, subID, numWords, uni) + cheapRequestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, cheapHash, subID, numWords, 500_000, uni) // Wait for fulfillment to be queued for cheap key hash. gomega.NewGomegaWithT(t).Eventually(func() bool { @@ -785,7 +974,7 @@ func TestVRFV2Integration_SingleConsumer_MultipleGasLanes(t *testing.T) { // Assert correct number of random words sent by coordinator. assertNumRandomWords(t, consumerContract, numWords) - expensiveRequestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, expensiveHash, subID, numWords, uni) + expensiveRequestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, expensiveHash, subID, numWords, 500_000, uni) // We should not have any new fulfillments until a top up. gomega.NewWithT(t).Consistently(func() bool { @@ -820,7 +1009,7 @@ func TestVRFV2Integration_SingleConsumer_MultipleGasLanes(t *testing.T) { } func TestVRFV2Integration_SingleConsumer_AlwaysRevertingCallback_StillFulfilled(t *testing.T) { - config, db := heavyweight.FullTestDB(t, "vrfv2_singleconsumer_alwaysrevertingcallback", true, true) + config, db := heavyweight.FullTestDB(t, "vrfv2_singleconsumer_alwaysrevertingcallback") ownerKey := cltest.MustGenerateRandomKey(t) uni := newVRFCoordinatorV2Universe(t, ownerKey, 0) app := cltest.NewApplicationWithConfigAndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey) @@ -845,12 +1034,12 @@ func TestVRFV2Integration_SingleConsumer_AlwaysRevertingCallback_StillFulfilled( require.NoError(t, app.Start(testutils.Context(t))) // Create VRF job. - jbs := createVRFJobs(t, [][]ethkey.KeyV2{{key}}, app, uni) + jbs := createVRFJobs(t, [][]ethkey.KeyV2{{key}}, app, uni, false) keyHash := jbs[0].VRFSpec.PublicKey.MustHash() // Make the randomness request. numWords := uint32(20) - requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, uni) + requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, uni) // Wait for fulfillment to be queued. gomega.NewGomegaWithT(t).Eventually(func() bool { @@ -1005,7 +1194,7 @@ func TestSimpleConsumerExample(t *testing.T) { } func TestIntegrationVRFV2(t *testing.T) { - config, _ := heavyweight.FullTestDB(t, "vrf_v2_integration", true, true) + config, _ := heavyweight.FullTestDB(t, "vrf_v2_integration") key := cltest.MustGenerateRandomKey(t) uni := newVRFCoordinatorV2Universe(t, key, 1) carol := uni.vrfConsumers[0] @@ -1015,7 +1204,7 @@ func TestIntegrationVRFV2(t *testing.T) { app := cltest.NewApplicationWithConfigAndKeyOnSimulatedBlockchain(t, config, uni.backend, key) config.Overrides.GlobalEvmGasLimitDefault = null.NewInt(0, false) config.Overrides.GlobalMinIncomingConfirmations = null.IntFrom(2) - keys, err := app.KeyStore.Eth().SendingKeys() + keys, err := app.KeyStore.Eth().SendingKeys(nil) // Reconfigure the sim chain with a default gas price of 1 gwei, // max gas limit of 2M and a key specific max 10 gwei price. @@ -1037,6 +1226,7 @@ func TestIntegrationVRFV2(t *testing.T) { JobID: jid.String(), Name: "vrf-primary", CoordinatorAddress: uni.rootContractAddress.String(), + BatchCoordinatorAddress: uni.batchCoordinatorContractAddress.String(), MinIncomingConfirmations: incomingConfs, PublicKey: vrfkey.PublicKey.String(), FromAddresses: []string{keys[0].Address.String()}, @@ -1195,7 +1385,7 @@ func TestIntegrationVRFV2(t *testing.T) { } func TestMaliciousConsumer(t *testing.T) { - config, _ := heavyweight.FullTestDB(t, "vrf_v2_integration_malicious", true, true) + config, _ := heavyweight.FullTestDB(t, "vrf_v2_integration_malicious") key := cltest.MustGenerateRandomKey(t) uni := newVRFCoordinatorV2Universe(t, key, 1) carol := uni.vrfConsumers[0] @@ -1218,6 +1408,7 @@ func TestMaliciousConsumer(t *testing.T) { JobID: jid.String(), Name: "vrf-primary", CoordinatorAddress: uni.rootContractAddress.String(), + BatchCoordinatorAddress: uni.batchCoordinatorContractAddress.String(), MinIncomingConfirmations: incomingConfs, PublicKey: vrfkey.PublicKey.String(), V2: true, @@ -1427,7 +1618,7 @@ func TestFulfillmentCost(t *testing.T) { } func TestStartingCountsV1(t *testing.T) { - cfg, db := heavyweight.FullTestDB(t, "vrf_test_starting_counts", true, false) + cfg, db := heavyweight.FullTestDBNoFixtures(t, "vrf_test_starting_counts") _, err := db.Exec(`INSERT INTO evm_chains (id, created_at, updated_at) VALUES (1337, NOW(), NOW())`) require.NoError(t, err) _, err = db.Exec(`INSERT INTO evm_heads (hash, number, parent_hash, created_at, timestamp, evm_chain_id) diff --git a/core/services/vrf/listener_v1.go b/core/services/vrf/listener_v1.go index 014314784c1..6b4b6e89b81 100644 --- a/core/services/vrf/listener_v1.go +++ b/core/services/vrf/listener_v1.go @@ -49,7 +49,7 @@ type listenerV1 struct { headBroadcaster httypes.HeadBroadcasterRegistry txm txmgr.TxManager gethks GethKeyStore - reqLogs *utils.Mailbox + reqLogs *utils.Mailbox[log.Broadcast] chStop chan struct{} waitOnStop chan struct{} newHead chan struct{} @@ -229,14 +229,10 @@ func (lsn *listenerV1) runLogListener(unsubscribes []func(), minConfs uint32) { case <-lsn.reqLogs.Notify(): // Process all the logs in the queue if one is added for { - i, exists := lsn.reqLogs.Retrieve() + lb, exists := lsn.reqLogs.Retrieve() if !exists { break } - lb, ok := i.(log.Broadcast) - if !ok { - panic(fmt.Sprintf("VRFListener: invariant violated, expected log.Broadcast got %T", i)) - } recovery.WrapRecover(lsn.l, func() { lsn.handleLog(lb, minConfs) }) @@ -407,26 +403,30 @@ func (lsn *listenerV1) ProcessRequest(req request) bool { "reqID", hex.EncodeToString(req.req.RequestID[:]), "reqTxHash", req.req.Raw.TxHash) return false - } else { - if run.HasErrors() || run.HasFatalErrors() { - lsn.l.Error("VRFV1 pipeline run failed with errors", - "reqID", hex.EncodeToString(req.req.RequestID[:]), - "keyHash", hex.EncodeToString(req.req.KeyHash[:]), - "reqTxHash", req.req.Raw.TxHash, - "runErrors", run.AllErrors.ToError(), - "runFatalErrors", run.FatalErrors.ToError(), - ) - return false - } else { - lsn.l.Debugw("Executed VRFV1 fulfillment run", - "reqID", hex.EncodeToString(req.req.RequestID[:]), - "keyHash", hex.EncodeToString(req.req.KeyHash[:]), - "reqTxHash", req.req.Raw.TxHash, - ) - incProcessedReqs(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, v1) - return true - } } + + // At this point the pipeline runner has completed the run of the pipeline, + // but it may have errored out. + if run.HasErrors() || run.HasFatalErrors() { + lsn.l.Error("VRFV1 pipeline run failed with errors", + "reqID", hex.EncodeToString(req.req.RequestID[:]), + "keyHash", hex.EncodeToString(req.req.KeyHash[:]), + "reqTxHash", req.req.Raw.TxHash, + "runErrors", run.AllErrors.ToError(), + "runFatalErrors", run.FatalErrors.ToError(), + ) + return false + } + + // At this point, the pipeline run executed successfully, and we mark + // the request as processed. + lsn.l.Debugw("Executed VRFV1 fulfillment run", + "reqID", hex.EncodeToString(req.req.RequestID[:]), + "keyHash", hex.EncodeToString(req.req.KeyHash[:]), + "reqTxHash", req.req.Raw.TxHash, + ) + incProcessedReqs(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, v1) + return true } // Close complies with job.Service diff --git a/core/services/vrf/listener_v2.go b/core/services/vrf/listener_v2.go index e90e441d85f..713ed3838ec 100644 --- a/core/services/vrf/listener_v2.go +++ b/core/services/vrf/listener_v2.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "fmt" + "math" "math/big" "sync" "time" @@ -11,9 +12,11 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" "github.com/pkg/errors" heaps "github.com/theodesp/go-heaps" "github.com/theodesp/go-heaps/pairing" + "go.uber.org/multierr" evmclient "github.com/smartcontractkit/chainlink/core/chains/evm/client" httypes "github.com/smartcontractkit/chainlink/core/chains/evm/headtracker/types" @@ -21,6 +24,7 @@ import ( "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr" evmtypes "github.com/smartcontractkit/chainlink/core/chains/evm/types" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/aggregator_v3_interface" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/batch_vrf_coordinator_v2" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/vrf_coordinator_v2" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/null" @@ -29,11 +33,14 @@ import ( "github.com/smartcontractkit/chainlink/core/services/pg" "github.com/smartcontractkit/chainlink/core/services/pipeline" "github.com/smartcontractkit/chainlink/core/utils" + bigmath "github.com/smartcontractkit/chainlink/core/utils/big_math" ) var ( - _ log.Listener = &listenerV2{} - _ job.ServiceCtx = &listenerV2{} + _ log.Listener = &listenerV2{} + _ job.ServiceCtx = &listenerV2{} + coordinatorV2ABI = evmtypes.MustGetABI(vrf_coordinator_v2.VRFCoordinatorV2ABI) + batchCoordinatorV2ABI = evmtypes.MustGetABI(batch_vrf_coordinator_v2.BatchVRFCoordinatorV2ABI) ) const ( @@ -43,13 +50,82 @@ const ( 2*2100 + 20000 - // cold read oracle address and oracle balance and first time oracle balance update, note first time will be 20k, but 5k subsequently 4800 + // request delete refund (refunds happen after execution), note pre-london fork was 15k. See https://eips.ethereum.org/EIPS/eip-3529 6685 // Positive static costs of argument encoding etc. note that it varies by +/- x*12 for every x bytes of non-zero data in the proof. + + // BatchFulfillmentIterationGasCost is the cost of a single iteration of the batch coordinator's + // loop. This is used to determine the gas allowance for a batch fulfillment call. + BatchFulfillmentIterationGasCost = 52_000 + + // backoffFactor is the factor by which to increase the delay each time a request fails. + backoffFactor = 1.3 ) +func newListenerV2( + cfg Config, + l logger.Logger, + ethClient evmclient.Client, + chainID *big.Int, + logBroadcaster log.Broadcaster, + q pg.Q, + coordinator vrf_coordinator_v2.VRFCoordinatorV2Interface, + batchCoordinator batch_vrf_coordinator_v2.BatchVRFCoordinatorV2Interface, + aggregator *aggregator_v3_interface.AggregatorV3Interface, + txm txmgr.TxManager, + pipelineRunner pipeline.Runner, + gethks keystore.Eth, + job job.Job, + reqLogs *utils.Mailbox[log.Broadcast], + reqAdded func(), + respCount map[string]uint64, + headBroadcaster httypes.HeadBroadcasterRegistry, + deduper *logDeduper, +) *listenerV2 { + return &listenerV2{ + cfg: cfg, + l: l, + ethClient: ethClient, + chainID: chainID, + logBroadcaster: logBroadcaster, + txm: txm, + coordinator: coordinator, + batchCoordinator: batchCoordinator, + pipelineRunner: pipelineRunner, + job: job, + q: q, + gethks: gethks, + reqLogs: reqLogs, + chStop: make(chan struct{}), + reqAdded: reqAdded, + respCount: respCount, + blockNumberToReqID: pairing.New(), + headBroadcaster: headBroadcaster, + latestHeadMu: sync.RWMutex{}, + wg: &sync.WaitGroup{}, + aggregator: aggregator, + deduper: deduper, + } +} + type pendingRequest struct { confirmedAtBlock uint64 req *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested lb log.Broadcast utcTimestamp time.Time + + // used for exponential backoff when retrying + attempts int + lastTry time.Time +} + +type vrfPipelineResult struct { + err error + maxLink *big.Int + juelsNeeded *big.Int + run pipeline.Run + payload string + gasLimit uint64 + req pendingRequest + proof vrf_coordinator_v2.VRFProof + reqCommitment vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment } type listenerV2 struct { @@ -57,14 +133,18 @@ type listenerV2 struct { cfg Config l logger.Logger ethClient evmclient.Client + chainID *big.Int logBroadcaster log.Broadcaster txm txmgr.TxManager - coordinator *vrf_coordinator_v2.VRFCoordinatorV2 + + coordinator vrf_coordinator_v2.VRFCoordinatorV2Interface + batchCoordinator batch_vrf_coordinator_v2.BatchVRFCoordinatorV2Interface + pipelineRunner pipeline.Runner job job.Job q pg.Q gethks keystore.Eth - reqLogs *utils.Mailbox + reqLogs *utils.Mailbox[log.Broadcast] chStop chan struct{} // We can keep these pending logs in memory because we // only mark them confirmed once we send a corresponding fulfillment transaction. @@ -91,7 +171,7 @@ type listenerV2 struct { wg *sync.WaitGroup // aggregator client to get link/eth feed prices from chain. - aggregator *aggregator_v3_interface.AggregatorV3Interface + aggregator aggregator_v3_interface.AggregatorV3InterfaceInterface // deduper prevents processing duplicate requests from the log broadcaster. deduper *logDeduper @@ -167,7 +247,7 @@ func (lsn *listenerV2) getAndRemoveConfirmedLogsBySub(latestHead uint64) map[uin var toProcess = make(map[uint64][]pendingRequest) var toKeep []pendingRequest for i := 0; i < len(lsn.reqs); i++ { - if r := lsn.reqs[i]; r.confirmedAtBlock <= latestHead { + if r := lsn.reqs[i]; lsn.ready(r, latestHead) { toProcess[r.req.SubId] = append(toProcess[r.req.SubId], r) } else { toKeep = append(toKeep, lsn.reqs[i]) @@ -177,6 +257,37 @@ func (lsn *listenerV2) getAndRemoveConfirmedLogsBySub(latestHead uint64) map[uin return toProcess } +func (lsn *listenerV2) ready(req pendingRequest, latestHead uint64) bool { + // Request is not eligible for fulfillment yet + if req.confirmedAtBlock > latestHead { + return false + } + + if lsn.job.VRFSpec.BackoffInitialDelay == 0 || req.attempts == 0 { + // Backoff is disabled, or this is the first try + return true + } + + return time.Now().UTC().After( + nextTry( + req.attempts, + lsn.job.VRFSpec.BackoffInitialDelay, + lsn.job.VRFSpec.BackoffMaxDelay, + req.lastTry)) +} + +func nextTry(retries int, initial, max time.Duration, last time.Time) time.Time { + expBackoffFactor := math.Pow(backoffFactor, float64(retries-1)) + + var delay time.Duration + if expBackoffFactor > float64(max/initial) { + delay = max + } else { + delay = time.Duration(float64(initial) * expBackoffFactor) + } + return last.Add(delay) +} + // Remove all entries 10000 blocks or older // to avoid a memory leak. func (lsn *listenerV2) pruneConfirmedRequestCounts() { @@ -209,7 +320,7 @@ func (lsn *listenerV2) pruneConfirmedRequestCounts() { // 2) the max gas price provides a very large buffer most of the time. // Its easier to optimistically assume it will go though and in the rare case of a reversion // we simply retry TODO: follow up where if we see a fulfillment revert, return log to the queue. -func (lsn *listenerV2) processPendingVRFRequests() { +func (lsn *listenerV2) processPendingVRFRequests(ctx context.Context) { confirmed := lsn.getAndRemoveConfirmedLogsBySub(lsn.getLatestHead()) processed := make(map[string]struct{}) start := time.Now() @@ -220,7 +331,21 @@ func (lsn *listenerV2) processPendingVRFRequests() { for _, subReqs := range confirmed { for _, req := range subReqs { if _, ok := processed[req.req.RequestId.String()]; !ok { + req.attempts++ + req.lastTry = time.Now().UTC() toKeep = append(toKeep, req) + if lsn.job.VRFSpec.BackoffInitialDelay != 0 { + lsn.l.Infow("Request failed, next retry will be delayed.", + "reqID", req.req.RequestId.String(), + "subID", req.req.SubId, + "attempts", req.attempts, + "lastTry", req.lastTry.String(), + "nextTry", nextTry( + req.attempts, + lsn.job.VRFSpec.BackoffInitialDelay, + lsn.job.VRFSpec.BackoffMaxDelay, + req.lastTry)) + } } } } @@ -228,28 +353,37 @@ func (lsn *listenerV2) processPendingVRFRequests() { // so we merged the new ones with the ones that need to be requeued. lsn.reqsMu.Lock() lsn.reqs = append(lsn.reqs, toKeep...) - lsn.reqsMu.Unlock() lsn.l.Infow("Finished processing pending requests", - "total processed", len(processed), - "total unprocessed", len(toKeep), + "totalProcessed", len(processed), + "totalFailed", len(toKeep), + "total", len(lsn.reqs), "time", time.Since(start).String()) + lsn.reqsMu.Unlock() // unlock here since len(lsn.reqs) is a read, to avoid a data race. }() // TODO: also probably want to order these by request time so we service oldest first // Get subscription balance. Note that outside of this request handler, this can only decrease while there // are no pending requests if len(confirmed) == 0 { - lsn.l.Infow("No pending requests") + lsn.l.Infow("No pending requests ready for processing") return } for subID, reqs := range confirmed { - sub, err := lsn.coordinator.GetSubscription(nil, subID) + sub, err := lsn.coordinator.GetSubscription(&bind.CallOpts{ + Context: ctx, + }, subID) if err != nil { lsn.l.Errorw("Unable to read subscription balance", "err", err) continue } + + if !lsn.shouldProcessSub(subID, sub, reqs) { + lsn.l.Warnw("Not processing sub", "subID", subID, "balance", sub.Balance) + continue + } + startBalance := sub.Balance - p := lsn.processRequestsPerSub(subID, startBalance, reqs) + p := lsn.processRequestsPerSub(ctx, subID, startBalance, reqs) for reqID := range p { processed[reqID] = struct{}{} } @@ -257,6 +391,49 @@ func (lsn *listenerV2) processPendingVRFRequests() { lsn.pruneConfirmedRequestCounts() } +func (lsn *listenerV2) shouldProcessSub(subID uint64, sub vrf_coordinator_v2.GetSubscription, reqs []pendingRequest) bool { + // This really shouldn't happen, but sanity check. + // No point in processing a sub if there are no requests to service. + if len(reqs) == 0 { + return false + } + + vrfRequest := reqs[0].req + l := lsn.l.With( + "subID", subID, + "balance", sub.Balance, + "requestID", vrfRequest.RequestId.String(), + ) + + fromAddresses := lsn.fromAddresses() + if len(fromAddresses) == 0 { + l.Warn("Couldn't get next from address, processing sub anyway") + return true + } + + // NOTE: we are assuming that all keys have an identical max gas price. + // Otherwise, this is a misconfiguration of the node and/or job. + fromAddress := fromAddresses[0] + + gasPriceWei := lsn.cfg.KeySpecificMaxGasPriceWei(fromAddress) + + estimatedFee, err := lsn.estimateFeeJuels(reqs[0].req, gasPriceWei) + if err != nil { + l.Warnw("Couldn't estimate fee, processing sub anyway", "err", err) + return true + } + + if sub.Balance.Cmp(estimatedFee) < 0 { + l.Infow("Subscription is underfunded, not processing it's requests", + "estimatedFeeJuels", estimatedFee, + ) + return false + } + + // balance is sufficient for at least one request, good to process + return true +} + // MaybeSubtractReservedLink figures out how much LINK is reserved for other VRF requests that // have not been fully confirmed yet on-chain, and subtracts that from the given startBalance, // and returns that value if there are no errors. @@ -271,19 +448,20 @@ func MaybeSubtractReservedLink(l logger.Logger, q pg.Q, startBalance *big.Int, c GROUP BY meta->>'SubId'`, chainID, subID) if err != nil && !errors.Is(err, sql.ErrNoRows) { l.Errorw("Could not get reserved link", "err", err) - return startBalance, err + return nil, err } if reservedLink != "" { reservedLinkInt, success := big.NewInt(0).SetString(reservedLink, 10) if !success { l.Errorw("Error converting reserved link", "reservedLink", reservedLink) - return startBalance, errors.New("unable to convert returned link") + return nil, errors.New("unable to convert returned link") } - // Subtract the reserved link - return startBalance.Sub(startBalance, reservedLinkInt), nil + + return new(big.Int).Sub(startBalance, reservedLinkInt), nil } - return startBalance, nil + + return new(big.Int).Set(startBalance), nil } type fulfilledReqV2 struct { @@ -304,7 +482,8 @@ func (a fulfilledReqV2) Compare(b heaps.Item) int { } } -func (lsn *listenerV2) processRequestsPerSub( +func (lsn *listenerV2) processRequestsPerSubBatch( + ctx context.Context, subID uint64, startBalance *big.Int, reqs []pendingRequest, @@ -312,127 +491,390 @@ func (lsn *listenerV2) processRequestsPerSub( start := time.Now() var processed = make(map[string]struct{}) startBalanceNoReserveLink, err := MaybeSubtractReservedLink( - lsn.l, lsn.q, startBalance, lsn.ethClient.ChainID().Uint64(), subID) + lsn.l, lsn.q, startBalance, lsn.chainID.Uint64(), subID) if err != nil { lsn.l.Errorw("Couldn't get reserved LINK for subscription", "sub", reqs[0].req.SubId) return processed } - lggr := lsn.l.With( + // Base the max gas for a batch on the max gas limit for a single callback. + // Since the max gas limit for a single callback is usually quite large already, + // we probably don't want to exceed it too much so that we can reliably get + // batch fulfillments included, while also making sure that the biggest gas guzzler + // callbacks are included. + config, err := lsn.coordinator.GetConfig(&bind.CallOpts{ + Context: ctx, + }) + if err != nil { + lsn.l.Errorw("Couldn't get config from coordinator", "err", err) + return processed + } + + // Add very conservative upper bound estimate on verification costs. + batchMaxGas := uint64(config.MaxGasLimit + 400_000) + + l := lsn.l.With( "subID", reqs[0].req.SubId, - "reqs", len(reqs), + "eligibleSubReqs", len(reqs), "startBalance", startBalance.String(), "startBalanceNoReservedLink", startBalanceNoReserveLink.String(), + "batchMaxGas", batchMaxGas, ) - lggr.Infow("Processing requests for subscription") - // Attempt to process every request, break if we run out of balance - for _, req := range reqs { - fromAddress, err := lsn.gethks.GetRoundRobinAddress(lsn.fromAddresses()...) + defer func() { + l.Infow("Finished processing for sub", + "endBalance", startBalanceNoReserveLink.String(), + "totalProcessed", len(processed), + "totalUnique", uniqueReqs(reqs), + "time", time.Since(start).String()) + }() + + l.Infow("Processing requests for subscription with batching") + + // Check for already consumed or expired reqs + unconsumed, processedReqs := lsn.getUnconsumed(l, reqs) + for _, reqID := range processedReqs { + processed[reqID] = struct{}{} + } + + // Process requests in chunks in order to kick off as many jobs + // as configured in parallel. Then we can combine into fulfillment + // batches afterwards. + for chunkStart := 0; chunkStart < len(unconsumed); chunkStart += int(lsn.job.VRFSpec.ChunkSize) { + chunkEnd := chunkStart + int(lsn.job.VRFSpec.ChunkSize) + if chunkEnd > len(unconsumed) { + chunkEnd = len(unconsumed) + } + chunk := unconsumed[chunkStart:chunkEnd] + + var unfulfilled []pendingRequest + alreadyFulfilled, err := lsn.checkReqsFulfilled(ctx, l, chunk) + if errors.Is(err, context.Canceled) { + l.Errorw("Context canceled, stopping request processing", "err", err) + return processed + } else if err != nil { + l.Errorw("Error checking for already fulfilled requests, proceeding anyway", "err", err) + } + for i, a := range alreadyFulfilled { + if a { + lsn.markLogAsConsumed(chunk[i].lb) + processed[chunk[i].req.RequestId.String()] = struct{}{} + } else { + unfulfilled = append(unfulfilled, chunk[i]) + } + } + + fromAddress, err := lsn.gethks.GetRoundRobinAddress(lsn.chainID, lsn.fromAddresses()...) if err != nil { - lggr.Errorw("Couldn't get next from address", "err", err) + l.Errorw("Couldn't get next from address", "err", err) continue } maxGasPriceWei := lsn.cfg.KeySpecificMaxGasPriceWei(fromAddress) - vrfRequest := req.req - rlog := lggr.With( - "reqID", vrfRequest.RequestId.String(), - "txHash", vrfRequest.Raw.TxHash, - "maxGasPrice", maxGasPriceWei.String(), - "fromAddress", fromAddress) + pipelines := lsn.runPipelines(ctx, l, maxGasPriceWei, unfulfilled) + batches := newBatchFulfillments(batchMaxGas) + for _, p := range pipelines { + ll := l.With("reqID", p.req.req.RequestId.String(), + "txHash", p.req.req.Raw.TxHash, + "maxGasPrice", maxGasPriceWei.String(), + "fromAddress", fromAddress, + "juelsNeeded", p.juelsNeeded.String(), + "maxLink", p.maxLink.String(), + "gasLimit", p.gasLimit, + "attempts", p.req.attempts) - // This check to see if the log was consumed needs to be in the same - // goroutine as the mark consumed to avoid processing duplicates. - consumed, err := lsn.logBroadcaster.WasAlreadyConsumed(req.lb) - if err != nil { - // Do not process for now, retry on next iteration. - rlog.Errorw("Could not determine if log was already consumed", "error", err) - continue - } else if consumed { - processed[vrfRequest.RequestId.String()] = struct{}{} - continue + if p.err != nil { + if startBalanceNoReserveLink.Cmp(p.juelsNeeded) < 0 { + ll.Infow("Insufficient link balance to fulfill a request based on estimate, returning") + return processed + } + + ll.Errorw("Pipeline error", "err", p.err) + continue + } + + if startBalanceNoReserveLink.Cmp(p.maxLink) < 0 { + // Insufficient funds, have to wait for a user top up. + // Break out of the loop now and process what we are able to process + // in the constructed batches. + ll.Infow("Insufficient link balance to fulfill a request, breaking") + break + } + + batches.addRun(p) + + startBalanceNoReserveLink.Sub(startBalanceNoReserveLink, p.maxLink) } - // Check if we can ignore the request due to it's age. - if time.Now().UTC().Sub(req.utcTimestamp) >= lsn.job.VRFSpec.RequestTimeout { - rlog.Infow("Request too old, dropping it") - lsn.markLogAsConsumed(req.lb) - processed[vrfRequest.RequestId.String()] = struct{}{} - incDroppedReqs(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, v2, reasonAge) - continue + var processedRequestIDs []string + for _, batch := range batches.fulfillments { + l.Debugw("Processing batch", "batchSize", len(batch.proofs)) + p := lsn.processBatch(l, subID, fromAddress, startBalanceNoReserveLink, batchMaxGas, batch) + processedRequestIDs = append(processedRequestIDs, p...) } - // Check if the vrf req has already been fulfilled - // If so we just mark it completed - callback, err := lsn.coordinator.GetCommitment(nil, vrfRequest.RequestId) - if err != nil { - rlog.Errorw("Unable to check if already fulfilled, processing anyways", "err", err) - } else if utils.IsEmpty(callback[:]) { - // If seedAndBlockNumber is zero then the response has been fulfilled - // and we should skip it - rlog.Infow("Request already fulfilled", "callback", callback) - lsn.markLogAsConsumed(req.lb) - processed[vrfRequest.RequestId.String()] = struct{}{} - continue + for _, reqID := range processedRequestIDs { + processed[reqID] = struct{}{} + } + } + + return processed +} + +func (lsn *listenerV2) processRequestsPerSub( + ctx context.Context, + subID uint64, + startBalance *big.Int, + reqs []pendingRequest, +) map[string]struct{} { + if lsn.job.VRFSpec.BatchFulfillmentEnabled && lsn.batchCoordinator != nil { + return lsn.processRequestsPerSubBatch(ctx, subID, startBalance, reqs) + } + + start := time.Now() + var processed = make(map[string]struct{}) + startBalanceNoReserveLink, err := MaybeSubtractReservedLink( + lsn.l, lsn.q, startBalance, lsn.ethClient.ChainID().Uint64(), subID) + if err != nil { + lsn.l.Errorw("Couldn't get reserved LINK for subscription", "sub", reqs[0].req.SubId) + return processed + } + + l := lsn.l.With( + "subID", reqs[0].req.SubId, + "eligibleSubReqs", len(reqs), + "startBalance", startBalance.String(), + "startBalanceNoReservedLink", startBalanceNoReserveLink.String(), + ) + + defer func() { + l.Infow("Finished processing for sub", + "endBalance", startBalanceNoReserveLink.String(), + "totalProcessed", len(processed), + "totalUnique", uniqueReqs(reqs), + "time", time.Since(start).String()) + }() + + l.Infow("Processing requests for subscription") + + // Check for already consumed or expired reqs + unconsumed, processedReqs := lsn.getUnconsumed(l, reqs) + for _, reqID := range processedReqs { + processed[reqID] = struct{}{} + } + + // Process requests in chunks + for chunkStart := 0; chunkStart < len(unconsumed); chunkStart += int(lsn.job.VRFSpec.ChunkSize) { + chunkEnd := chunkStart + int(lsn.job.VRFSpec.ChunkSize) + if chunkEnd > len(unconsumed) { + chunkEnd = len(unconsumed) + } + chunk := unconsumed[chunkStart:chunkEnd] + + var unfulfilled []pendingRequest + alreadyFulfilled, err := lsn.checkReqsFulfilled(ctx, l, chunk) + if errors.Is(err, context.Canceled) { + l.Errorw("Context canceled, stopping request processing", "err", err) + return processed + } else if err != nil { + l.Errorw("Error checking for already fulfilled requests, proceeding anyway", "err", err) } - // Run the pipeline to determine the max link that could be billed at maxGasPrice. - // The ethcall will error if there is currently insufficient balance onchain. - maxLink, run, payload, gaslimit, err := lsn.getMaxLinkForFulfillment(maxGasPriceWei, req, rlog) + for i, a := range alreadyFulfilled { + if a { + lsn.markLogAsConsumed(chunk[i].lb) + processed[chunk[i].req.RequestId.String()] = struct{}{} + } else { + unfulfilled = append(unfulfilled, chunk[i]) + } + } + + fromAddress, err := lsn.gethks.GetRoundRobinAddress(lsn.chainID, lsn.fromAddresses()...) if err != nil { - rlog.Warnw("Unable to get max link for fulfillment, skipping request", "err", err) + l.Errorw("Couldn't get next from address", "err", err) continue } - if startBalance.Cmp(maxLink) < 0 { - // Insufficient funds, have to wait for a user top up - // leave it unprocessed for now - rlog.Infow("Insufficient link balance to fulfill a request, breaking", "maxLink", maxLink) - break - } - rlog.Infow("Enqueuing fulfillment") - // We have enough balance to service it, lets enqueue for txm - err = lsn.q.Transaction(func(tx pg.Queryer) error { - if err = lsn.pipelineRunner.InsertFinishedRun(&run, true, pg.WithQueryer(tx)); err != nil { - return err + maxGasPriceWei := lsn.cfg.KeySpecificMaxGasPriceWei(fromAddress) + + pipelines := lsn.runPipelines(ctx, l, maxGasPriceWei, unfulfilled) + for _, p := range pipelines { + ll := l.With("reqID", p.req.req.RequestId.String(), + "txHash", p.req.req.Raw.TxHash, + "maxGasPrice", maxGasPriceWei.String(), + "fromAddress", fromAddress, + "juelsNeeded", p.juelsNeeded.String(), + "maxLink", p.maxLink.String(), + "gasLimit", p.gasLimit, + "attempts", p.req.attempts) + + if p.err != nil { + if startBalanceNoReserveLink.Cmp(p.juelsNeeded) < 0 { + ll.Infow("Insufficient link balance to fulfill a request based on estimate, returning") + return processed + } + + ll.Errorw("Pipeline error", "err", p.err) + continue + } + + if startBalanceNoReserveLink.Cmp(p.maxLink) < 0 { + // Insufficient funds, have to wait for a user top up. Leave it unprocessed for now + ll.Infow("Insufficient link balance to fulfill a request, returning") + return processed } - if err = lsn.logBroadcaster.MarkConsumed(req.lb, pg.WithQueryer(tx)); err != nil { + + ll.Infow("Enqueuing fulfillment") + var ethTX txmgr.EthTx + err = lsn.q.Transaction(func(tx pg.Queryer) error { + if err = lsn.pipelineRunner.InsertFinishedRun(&p.run, true, pg.WithQueryer(tx)); err != nil { + return err + } + if err = lsn.logBroadcaster.MarkConsumed(p.req.lb, pg.WithQueryer(tx)); err != nil { + return err + } + + maxLinkString := p.maxLink.String() + ethTX, err = lsn.txm.CreateEthTransaction(txmgr.NewTx{ + FromAddress: fromAddress, + ToAddress: lsn.coordinator.Address(), + EncodedPayload: hexutil.MustDecode(p.payload), + GasLimit: p.gasLimit, + Meta: &txmgr.EthTxMeta{ + RequestID: common.BytesToHash(p.req.req.RequestId.Bytes()), + MaxLink: &maxLinkString, + SubID: &p.req.req.SubId, + }, + MinConfirmations: null.Uint32From(uint32(lsn.cfg.MinRequiredOutgoingConfirmations())), + Strategy: txmgr.NewSendEveryStrategy(), + Checker: txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeVRFV2, + VRFCoordinatorAddress: lsn.coordinator.Address(), + VRFRequestBlockNumber: new(big.Int).SetUint64(p.req.req.Raw.BlockNumber), + }, + }, pg.WithQueryer(tx), pg.WithParentCtx(ctx)) return err + }) + if err != nil { + ll.Errorw("Error enqueuing fulfillment, requeuing request", "err", err) + continue } - _, err = lsn.txm.CreateEthTransaction(txmgr.NewTx{ - FromAddress: fromAddress, - ToAddress: lsn.coordinator.Address(), - EncodedPayload: hexutil.MustDecode(payload), - GasLimit: gaslimit, - Meta: &txmgr.EthTxMeta{ - RequestID: common.BytesToHash(vrfRequest.RequestId.Bytes()), - MaxLink: maxLink.String(), - SubID: vrfRequest.SubId, - }, - MinConfirmations: null.Uint32From(uint32(lsn.cfg.MinRequiredOutgoingConfirmations())), - Strategy: txmgr.NewSendEveryStrategy(), - Checker: txmgr.TransmitCheckerSpec{ - CheckerType: txmgr.TransmitCheckerTypeVRFV2, - VRFCoordinatorAddress: lsn.coordinator.Address(), + ll.Infow("Enqueued fulfillment", "ethTxID", ethTX.ID) + + // If we successfully enqueued for the txm, subtract that balance + // And loop to attempt to enqueue another fulfillment + startBalanceNoReserveLink.Sub(startBalanceNoReserveLink, p.maxLink) + processed[p.req.req.RequestId.String()] = struct{}{} + incProcessedReqs(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, v2) + } + } + + return processed +} + +// checkReqsFulfilled returns a bool slice the same size of the given reqs slice +// where each slice element indicates whether that request was already fulfilled +// or not. +func (lsn *listenerV2) checkReqsFulfilled(ctx context.Context, l logger.Logger, reqs []pendingRequest) ([]bool, error) { + var ( + start = time.Now() + calls = make([]rpc.BatchElem, len(reqs)) + fulfilled = make([]bool, len(reqs)) + ) + + for i, req := range reqs { + payload, err := coordinatorV2ABI.Pack("getCommitment", req.req.RequestId) + if err != nil { + // This shouldn't happen + return fulfilled, errors.Wrap(err, "creating getCommitment payload") + } + + reqBlockNumber := new(big.Int).SetUint64(req.req.Raw.BlockNumber) + + // Subtract 5 since the newest block likely isn't indexed yet and will cause "header not + // found" errors. + currBlock := new(big.Int).SetUint64(lsn.getLatestHead() - 5) + m := bigmath.Max(reqBlockNumber, currBlock) + + var result string + calls[i] = rpc.BatchElem{ + Method: "eth_call", + Args: []interface{}{ + map[string]interface{}{ + "to": lsn.coordinator.Address(), + "data": hexutil.Bytes(payload), }, - }, pg.WithQueryer(tx)) - return err - }) + // The block at which we want to make the call + hexutil.EncodeBig(m), + }, + Result: &result, + } + } + + err := lsn.ethClient.BatchCallContext(ctx, calls) + if err != nil { + return fulfilled, errors.Wrap(err, "making batch call") + } + + var errs error + for i, call := range calls { + if call.Error != nil { + errs = multierr.Append(errs, fmt.Errorf("checking request %s with hash %s: %w", + reqs[i].req.RequestId.String(), reqs[i].req.Raw.TxHash.String(), call.Error)) + continue + } + + rString, ok := call.Result.(*string) + if !ok { + errs = multierr.Append(errs, + fmt.Errorf("unexpected result %+v on request %s with hash %s", + call.Result, reqs[i].req.RequestId.String(), reqs[i].req.Raw.TxHash.String())) + continue + } + result, err := hexutil.Decode(*rString) if err != nil { - rlog.Errorw("Error enqueuing fulfillment, requeuing request", "err", err) + errs = multierr.Append(errs, + fmt.Errorf("decoding batch call result %+v %s request %s with hash %s: %w", + call.Result, *rString, reqs[i].req.RequestId.String(), reqs[i].req.Raw.TxHash.String(), err)) continue } - // If we successfully enqueued for the txm, subtract that balance - // And loop to attempt to enqueue another fulfillment - startBalanceNoReserveLink = startBalanceNoReserveLink.Sub(startBalanceNoReserveLink, maxLink) - processed[vrfRequest.RequestId.String()] = struct{}{} - incProcessedReqs(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, v2) - } - lggr.Infow("Finished processing for sub", - "total reqs", len(reqs), - "total processed", len(processed), - "total unique", uniqueReqs(reqs), - "time", time.Since(start).String()) - return processed + + if utils.IsEmpty(result) { + l.Infow("Request already fulfilled", + "reqID", reqs[i].req.RequestId.String(), + "attempts", reqs[i].attempts, + "txHash", reqs[i].req.Raw.TxHash) + fulfilled[i] = true + } + } + + l.Debugw("Done checking fulfillment status", + "numChecked", len(reqs), "time", time.Since(start).String()) + return fulfilled, errs +} + +func (lsn *listenerV2) runPipelines( + ctx context.Context, + l logger.Logger, + maxGasPriceWei *big.Int, + reqs []pendingRequest, +) []vrfPipelineResult { + var ( + start = time.Now() + results = make([]vrfPipelineResult, len(reqs)) + wg = sync.WaitGroup{} + ) + + for i, req := range reqs { + wg.Add(1) + go func(i int, req pendingRequest) { + defer wg.Done() + results[i] = lsn.simulateFulfillment(ctx, maxGasPriceWei, req, l) + }(i, req) + } + wg.Wait() + + l.Debugw("Finished running pipelines", + "count", len(reqs), "time", time.Since(start).String()) + return results } func (lsn *listenerV2) estimateFeeJuels( @@ -446,9 +888,7 @@ func (lsn *listenerV2) estimateFeeJuels( if err != nil { return nil, errors.Wrap(err, "get aggregator latestAnswer") } - // NOTE: no need to sanity check this as this is for logging purposes only - // and should not be used to determine whether a user has enough funds in actuality, - // we should always simulate for that. + juelsNeeded, err := EstimateFeeJuels( req.CallbackGasLimit, maxGasPriceWei, @@ -462,26 +902,27 @@ func (lsn *listenerV2) estimateFeeJuels( // Here we use the pipeline to parse the log, generate a vrf response // then simulate the transaction at the max gas price to determine its maximum link cost. -func (lsn *listenerV2) getMaxLinkForFulfillment( +func (lsn *listenerV2) simulateFulfillment( + ctx context.Context, maxGasPriceWei *big.Int, req pendingRequest, lg logger.Logger, -) (*big.Int, pipeline.Run, string, uint64, error) { +) vrfPipelineResult { + var ( + res = vrfPipelineResult{req: req} + err error + ) // estimate how much juels are needed so that we can log it if the simulation fails. - juelsNeeded, err := lsn.estimateFeeJuels(req.req, maxGasPriceWei) + res.juelsNeeded, err = lsn.estimateFeeJuels(req.req, maxGasPriceWei) if err != nil { // not critical, just log and continue lg.Warnw("unable to estimate juels needed for request, continuing anyway", "reqID", req.req.RequestId, "err", err, ) - juelsNeeded = big.NewInt(0) + res.juelsNeeded = big.NewInt(0) } - var ( - maxLink *big.Int - payload string - gaslimit uint64 - ) + vars := pipeline.NewVarsFrom(map[string]interface{}{ "jobSpec": map[string]interface{}{ "databaseID": lsn.job.ID, @@ -498,50 +939,56 @@ func (lsn *listenerV2) getMaxLinkForFulfillment( "logData": req.req.Raw.Data, }, }) - run, trrs, err := lsn.pipelineRunner.ExecuteRun(context.Background(), *lsn.job.PipelineSpec, vars, lg) + var trrs pipeline.TaskRunResults + res.run, trrs, err = lsn.pipelineRunner.ExecuteRun(ctx, *lsn.job.PipelineSpec, vars, lg) if err != nil { - lg.Errorw("Failed executing run", "err", err) - return maxLink, run, payload, gaslimit, err + res.err = errors.Wrap(err, "executing run") + return res } // The call task will fail if there are insufficient funds - if run.AllErrors.HasError() { - lg.Warnw("Simulation errored, possibly insufficient funds. Request will remain unprocessed until funds are available", - "err", run.AllErrors.ToError(), "max gas price", maxGasPriceWei, "reqID", req.req.RequestId, "juelsNeeded", juelsNeeded) - return maxLink, run, payload, gaslimit, errors.Wrap(run.AllErrors.ToError(), "simulation errored") + if res.run.AllErrors.HasError() { + res.err = errors.Wrap(res.run.AllErrors.ToError(), "Simulation errored, possibly insufficient funds. Request will remain unprocessed until funds are available") + return res } if len(trrs.FinalResult(lg).Values) != 1 { - lg.Errorw("Unexpected number of outputs", "expectedNumOutputs", 1, "actualNumOutputs", len(trrs.FinalResult(lg).Values)) - return maxLink, run, payload, gaslimit, errors.New("unexpected number of outputs") + res.err = errors.Errorf("unexpected number of outputs, expected 1, was %d", len(trrs.FinalResult(lg).Values)) + return res } + // Run succeeded, we expect a byte array representing the billing amount b, ok := trrs.FinalResult(lg).Values[0].([]uint8) if !ok { - lg.Errorw("Unexpected type, expected []uint8 final result") - return maxLink, run, payload, gaslimit, errors.New("expected []uint8 final result") + res.err = errors.New("expected []uint8 final result") + return res } - maxLink = utils.HexToBig(hexutil.Encode(b)[2:]) + res.maxLink = utils.HexToBig(hexutil.Encode(b)[2:]) for _, trr := range trrs { if trr.Task.Type() == pipeline.TaskTypeVRFV2 { m := trr.Result.Value.(map[string]interface{}) - payload = m["output"].(string) + res.payload = m["output"].(string) + res.proof = m["proof"].(vrf_coordinator_v2.VRFProof) + res.reqCommitment = m["requestCommitment"].(vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment) } + if trr.Task.Type() == pipeline.TaskTypeEstimateGasLimit { - gaslimit = trr.Result.Value.(uint64) + res.gasLimit = trr.Result.Value.(uint64) } } - return maxLink, run, payload, gaslimit, nil + return res } func (lsn *listenerV2) runRequestHandler(pollPeriod time.Duration, wg *sync.WaitGroup) { defer wg.Done() tick := time.NewTicker(pollPeriod) defer tick.Stop() + ctx, cancel := utils.ContextFromChan(lsn.chStop) + defer cancel() for { select { case <-lsn.chStop: return case <-tick.C: - lsn.processPendingVRFRequests() + lsn.processPendingVRFRequests(ctx) } } } @@ -560,14 +1007,10 @@ func (lsn *listenerV2) runLogListener(unsubscribes []func(), minConfs uint32, wg case <-lsn.reqLogs.Notify(): // Process all the logs in the queue if one is added for { - i, exists := lsn.reqLogs.Retrieve() + lb, exists := lsn.reqLogs.Retrieve() if !exists { break } - lb, ok := i.(log.Broadcast) - if !ok { - panic(fmt.Sprintf("VRFListenerV2: invariant violated, expected log.Broadcast got %T", i)) - } lsn.handleLog(lb, minConfs) } } @@ -680,7 +1123,7 @@ func (lsn *listenerV2) HandleLog(lb log.Broadcast) { } } -// Job complies with log.Listener +// JobID complies with log.Listener func (lsn *listenerV2) JobID() int32 { return lsn.job.ID } diff --git a/core/services/vrf/listener_v2_test.go b/core/services/vrf/listener_v2_test.go index 9e89edc3d0c..d1328838494 100644 --- a/core/services/vrf/listener_v2_test.go +++ b/core/services/vrf/listener_v2_test.go @@ -3,22 +3,32 @@ package vrf import ( "math/big" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" uuid "github.com/satori/go.uuid" + "github.com/shopspring/decimal" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/smartcontractkit/chainlink/core/assets" + "github.com/smartcontractkit/chainlink/core/services/job" + "github.com/smartcontractkit/sqlx" + "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/aggregator_v3_interface" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/vrf_coordinator_v2" "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/keystore" + "github.com/smartcontractkit/chainlink/core/services/keystore/keys/ethkey" "github.com/smartcontractkit/chainlink/core/services/pg" + vrf_mocks "github.com/smartcontractkit/chainlink/core/services/vrf/mocks" "github.com/smartcontractkit/chainlink/core/testdata/testspecs" "github.com/smartcontractkit/chainlink/core/utils" - "github.com/smartcontractkit/sqlx" ) func addEthTx(t *testing.T, db *sqlx.DB, from common.Address, state txmgr.EthTxState, maxLink string, subID uint64) { @@ -34,8 +44,8 @@ func addEthTx(t *testing.T, db *sqlx.DB, from common.Address, state txmgr.EthTxS 0, // limit state, txmgr.EthTxMeta{ - MaxLink: maxLink, - SubID: subID, + MaxLink: &maxLink, + SubID: &subID, }, uuid.NullUUID{}, 1337, @@ -57,8 +67,8 @@ func addConfirmedEthTx(t *testing.T, db *sqlx.DB, from common.Address, maxLink s 0, // value 0, // limit txmgr.EthTxMeta{ - MaxLink: maxLink, - SubID: subID, + MaxLink: &maxLink, + SubID: &subID, }, uuid.NullUUID{}, 1337, @@ -169,3 +179,257 @@ func TestListener_GetConfirmedAt(t *testing.T) { }, uint32(nodeMinConfs)) require.Equal(t, uint64(200), confirmedAt) // log block number + # of confirmations } + +func TestListener_Backoff(t *testing.T) { + var tests = []struct { + name string + initial time.Duration + max time.Duration + last time.Duration + retries int + expected bool + }{ + { + name: "Backoff disabled, ready", + expected: true, + }, + { + name: "First try, ready", + initial: time.Minute, + max: time.Hour, + last: 0, + retries: 0, + expected: true, + }, + { + name: "Second try, not ready", + initial: time.Minute, + max: time.Hour, + last: 59 * time.Second, + retries: 1, + expected: false, + }, + { + name: "Second try, ready", + initial: time.Minute, + max: time.Hour, + last: 61 * time.Second, // Last try was over a minute ago + retries: 1, + expected: true, + }, + { + name: "Third try, not ready", + initial: time.Minute, + max: time.Hour, + last: 77 * time.Second, // Slightly less than backoffFactor * initial + retries: 2, + expected: false, + }, + { + name: "Third try, ready", + initial: time.Minute, + max: time.Hour, + last: 79 * time.Second, // Slightly more than backoffFactor * initial + retries: 2, + expected: true, + }, + { + name: "Max, not ready", + initial: time.Minute, + max: time.Hour, + last: 59 * time.Minute, // Slightly less than max + retries: 900, + expected: false, + }, + { + name: "Max, ready", + initial: time.Minute, + max: time.Hour, + last: 61 * time.Minute, // Slightly more than max + retries: 900, + expected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + lsn := &listenerV2{job: job.Job{ + VRFSpec: &job.VRFSpec{ + BackoffInitialDelay: test.initial, + BackoffMaxDelay: test.max, + }, + }} + + req := pendingRequest{ + confirmedAtBlock: 5, + attempts: test.retries, + lastTry: time.Now().Add(-test.last), + } + + require.Equal(t, test.expected, lsn.ready(req, 10)) + }) + } +} + +func TestListener_ShouldProcessSub_NotEnoughBalance(t *testing.T) { + mockAggregator := &vrf_mocks.AggregatorV3Interface{} + mockAggregator.On("LatestRoundData", mock.Anything).Return( + aggregator_v3_interface.LatestRoundData{ + Answer: decimal.RequireFromString("9821673525377230000").BigInt(), + }, + nil, + ) + defer mockAggregator.AssertExpectations(t) + + cfg := &vrf_mocks.Config{} + cfg.On("KeySpecificMaxGasPriceWei", mock.Anything).Return( + assets.GWei(200), + ) + defer cfg.AssertExpectations(t) + + lsn := &listenerV2{ + job: job.Job{ + VRFSpec: &job.VRFSpec{ + FromAddresses: []ethkey.EIP55Address{ + ethkey.EIP55Address("0x7Bf4E7069d96eEce4f48F50A9768f8615A8cD6D8"), + }, + }, + }, + aggregator: mockAggregator, + l: logger.TestLogger(t), + chainID: big.NewInt(1337), + cfg: cfg, + } + subID := uint64(1) + sub := vrf_coordinator_v2.GetSubscription{ + Balance: assets.GWei(100), + } + shouldProcess := lsn.shouldProcessSub(subID, sub, []pendingRequest{ + { + req: &vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{ + CallbackGasLimit: 1e6, + RequestId: big.NewInt(1), + }, + }, + }) + assert.False(t, shouldProcess) // estimated fee: 24435754189944131 juels, 100 GJuels not enough +} + +func TestListener_ShouldProcessSub_EnoughBalance(t *testing.T) { + mockAggregator := &vrf_mocks.AggregatorV3Interface{} + mockAggregator.On("LatestRoundData", mock.Anything).Return( + aggregator_v3_interface.LatestRoundData{ + Answer: decimal.RequireFromString("9821673525377230000").BigInt(), + }, + nil, + ) + defer mockAggregator.AssertExpectations(t) + + cfg := &vrf_mocks.Config{} + cfg.On("KeySpecificMaxGasPriceWei", mock.Anything).Return( + assets.GWei(200), + ) + defer cfg.AssertExpectations(t) + + lsn := &listenerV2{ + job: job.Job{ + VRFSpec: &job.VRFSpec{ + FromAddresses: []ethkey.EIP55Address{ + ethkey.EIP55Address("0x7Bf4E7069d96eEce4f48F50A9768f8615A8cD6D8"), + }, + }, + }, + aggregator: mockAggregator, + l: logger.TestLogger(t), + chainID: big.NewInt(1337), + cfg: cfg, + } + subID := uint64(1) + sub := vrf_coordinator_v2.GetSubscription{ + Balance: assets.Ether(1), + } + shouldProcess := lsn.shouldProcessSub(subID, sub, []pendingRequest{ + { + req: &vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{ + CallbackGasLimit: 1e6, + RequestId: big.NewInt(1), + }, + }, + }) + assert.True(t, shouldProcess) // estimated fee: 24435754189944131 juels, 1 LINK is enough. +} + +func TestListener_ShouldProcessSub_NoLinkEthPrice(t *testing.T) { + mockAggregator := &vrf_mocks.AggregatorV3Interface{} + mockAggregator.On("LatestRoundData", mock.Anything).Return( + aggregator_v3_interface.LatestRoundData{}, + errors.New("aggregator error"), + ) + defer mockAggregator.AssertExpectations(t) + + cfg := &vrf_mocks.Config{} + cfg.On("KeySpecificMaxGasPriceWei", mock.Anything).Return( + assets.GWei(200), + ) + defer cfg.AssertExpectations(t) + + lsn := &listenerV2{ + job: job.Job{ + VRFSpec: &job.VRFSpec{ + FromAddresses: []ethkey.EIP55Address{ + ethkey.EIP55Address("0x7Bf4E7069d96eEce4f48F50A9768f8615A8cD6D8"), + }, + }, + }, + aggregator: mockAggregator, + l: logger.TestLogger(t), + chainID: big.NewInt(1337), + cfg: cfg, + } + subID := uint64(1) + sub := vrf_coordinator_v2.GetSubscription{ + Balance: assets.Ether(1), + } + shouldProcess := lsn.shouldProcessSub(subID, sub, []pendingRequest{ + { + req: &vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{ + CallbackGasLimit: 1e6, + RequestId: big.NewInt(1), + }, + }, + }) + assert.True(t, shouldProcess) // no fee available, try to process it. +} + +func TestListener_ShouldProcessSub_NoFromAddresses(t *testing.T) { + mockAggregator := &vrf_mocks.AggregatorV3Interface{} + defer mockAggregator.AssertExpectations(t) + + cfg := &vrf_mocks.Config{} + defer cfg.AssertExpectations(t) + + lsn := &listenerV2{ + job: job.Job{ + VRFSpec: &job.VRFSpec{ + FromAddresses: []ethkey.EIP55Address{}, + }, + }, + aggregator: mockAggregator, + l: logger.TestLogger(t), + chainID: big.NewInt(1337), + cfg: cfg, + } + subID := uint64(1) + sub := vrf_coordinator_v2.GetSubscription{ + Balance: assets.Ether(1), + } + shouldProcess := lsn.shouldProcessSub(subID, sub, []pendingRequest{ + { + req: &vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{ + CallbackGasLimit: 1e6, + RequestId: big.NewInt(1), + }, + }, + }) + assert.True(t, shouldProcess) // no addresses, but try to process it. +} diff --git a/core/services/vrf/listener_v2_types.go b/core/services/vrf/listener_v2_types.go new file mode 100644 index 00000000000..20bc95fd227 --- /dev/null +++ b/core/services/vrf/listener_v2_types.go @@ -0,0 +1,222 @@ +package vrf + +import ( + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink/core/chains/evm/log" + "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/batch_vrf_coordinator_v2" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/null" + "github.com/smartcontractkit/chainlink/core/services/pg" + "github.com/smartcontractkit/chainlink/core/services/pipeline" + bigmath "github.com/smartcontractkit/chainlink/core/utils/big_math" +) + +// batchFulfillment contains all the information needed in order to +// perform a batch fulfillment operation on the batch VRF coordinator. +type batchFulfillment struct { + proofs []batch_vrf_coordinator_v2.VRFTypesProof + commitments []batch_vrf_coordinator_v2.VRFTypesRequestCommitment + totalGasLimit uint64 + runs []*pipeline.Run + reqIDs []*big.Int + lbs []log.Broadcast + maxLinks []interface{} +} + +func newBatchFulfillment(result vrfPipelineResult) *batchFulfillment { + return &batchFulfillment{ + proofs: []batch_vrf_coordinator_v2.VRFTypesProof{ + batch_vrf_coordinator_v2.VRFTypesProof(result.proof), + }, + commitments: []batch_vrf_coordinator_v2.VRFTypesRequestCommitment{ + batch_vrf_coordinator_v2.VRFTypesRequestCommitment(result.reqCommitment), + }, + totalGasLimit: result.gasLimit, + runs: []*pipeline.Run{ + &result.run, + }, + reqIDs: []*big.Int{ + result.req.req.RequestId, + }, + lbs: []log.Broadcast{ + result.req.lb, + }, + maxLinks: []interface{}{ + result.maxLink, + }, + } +} + +// batchFulfillments manages many batchFulfillment objects. +// It makes organizing many runs into batches that respect the +// batchGasLimit easy via the addRun method. +type batchFulfillments struct { + fulfillments []*batchFulfillment + batchGasLimit uint64 + currIndex int +} + +func newBatchFulfillments(batchGasLimit uint64) *batchFulfillments { + return &batchFulfillments{ + fulfillments: []*batchFulfillment{}, + batchGasLimit: batchGasLimit, + currIndex: 0, + } +} + +// addRun adds the given run to an existing batch, or creates a new +// batch if the batchGasLimit that has been configured was exceeded. +func (b *batchFulfillments) addRun(result vrfPipelineResult) { + if len(b.fulfillments) == 0 { + b.fulfillments = append(b.fulfillments, newBatchFulfillment(result)) + } else { + currBatch := b.fulfillments[b.currIndex] + if (currBatch.totalGasLimit + result.gasLimit) >= b.batchGasLimit { + // don't add to curr batch, add new batch and increment index + b.fulfillments = append(b.fulfillments, newBatchFulfillment(result)) + b.currIndex++ + } else { + // we're okay on gas, add to current batch + currBatch.proofs = append(currBatch.proofs, batch_vrf_coordinator_v2.VRFTypesProof(result.proof)) + currBatch.commitments = append(currBatch.commitments, batch_vrf_coordinator_v2.VRFTypesRequestCommitment(result.reqCommitment)) + currBatch.totalGasLimit += result.gasLimit + currBatch.runs = append(currBatch.runs, &result.run) + currBatch.reqIDs = append(currBatch.reqIDs, result.req.req.RequestId) + currBatch.lbs = append(currBatch.lbs, result.req.lb) + currBatch.maxLinks = append(currBatch.maxLinks, result.maxLink) + } + } +} + +func (lsn *listenerV2) processBatch( + l logger.Logger, + subID uint64, + fromAddress common.Address, + startBalanceNoReserveLink *big.Int, + maxCallbackGasLimit uint64, + batch *batchFulfillment, +) (processedRequestIDs []string) { + start := time.Now() + + // Enqueue a single batch tx for requests that we're able to fulfill based on whether + // they passed simulation or not. + payload, err := batchCoordinatorV2ABI.Pack("fulfillRandomWords", batch.proofs, batch.commitments) + if err != nil { + // should never happen + l.Errorw("Failed to pack batch fulfillRandomWords payload", + "err", err, "proofs", batch.proofs, "commitments", batch.commitments) + return + } + + // Bump the total gas limit by a bit so that we account for the overhead of the batch + // contract's calling. + totalGasLimitBumped := batchFulfillmentGasEstimate( + uint64(len(batch.proofs)), + maxCallbackGasLimit, + lsn.job.VRFSpec.BatchFulfillmentGasMultiplier, + ) + ll := l.With("numRequestsInBatch", len(batch.reqIDs), + "requestIDs", batch.reqIDs, + "batchSumGasLimit", batch.totalGasLimit, + "linkBalance", startBalanceNoReserveLink, + "totalGasLimitBumped", totalGasLimitBumped, + "gasMultiplier", lsn.job.VRFSpec.BatchFulfillmentGasMultiplier, + ) + ll.Info("Enqueuing batch fulfillment") + var ethTX txmgr.EthTx + err = lsn.q.Transaction(func(tx pg.Queryer) error { + if err = lsn.pipelineRunner.InsertFinishedRuns(batch.runs, true, pg.WithQueryer(tx)); err != nil { + return errors.Wrap(err, "inserting finished pipeline runs") + } + + if err = lsn.logBroadcaster.MarkManyConsumed(batch.lbs, pg.WithQueryer(tx)); err != nil { + return errors.Wrap(err, "mark logs consumed") + } + + maxLinkStr := bigmath.Accumulate(batch.maxLinks).String() + reqIDHashes := []common.Hash{} + for _, reqID := range batch.reqIDs { + reqIDHashes = append(reqIDHashes, common.BytesToHash(reqID.Bytes())) + } + ethTX, err = lsn.txm.CreateEthTransaction(txmgr.NewTx{ + FromAddress: fromAddress, + ToAddress: lsn.batchCoordinator.Address(), + EncodedPayload: payload, + GasLimit: totalGasLimitBumped, + MinConfirmations: null.Uint32From(uint32(lsn.cfg.MinRequiredOutgoingConfirmations())), + Strategy: txmgr.NewSendEveryStrategy(), + Meta: &txmgr.EthTxMeta{ + RequestIDs: reqIDHashes, + MaxLink: &maxLinkStr, + SubID: &subID, + }, + }, pg.WithQueryer(tx)) + + return errors.Wrap(err, "create batch fulfillment eth transaction") + }) + if err != nil { + ll.Errorw("Error enqueuing batch fulfillments, requeuing requests", "err", err) + return + } + ll.Infow("Enqueued fulfillment", "ethTxID", ethTX.ID) + + // mark requests as processed since the fulfillment has been successfully enqueued + // to the txm. + for _, reqID := range batch.reqIDs { + processedRequestIDs = append(processedRequestIDs, reqID.String()) + incProcessedReqs(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, v2) + } + + ll.Infow("Successfully enqueued batch", "duration", time.Since(start)) + + return +} + +// getUnconsumed returns the requests in the given slice that are not expired +// and not marked consumed in the log broadcaster. +func (lsn *listenerV2) getUnconsumed(l logger.Logger, reqs []pendingRequest) (unconsumed []pendingRequest, processed []string) { + for _, req := range reqs { + // Check if we can ignore the request due to its age. + if time.Now().UTC().Sub(req.utcTimestamp) >= lsn.job.VRFSpec.RequestTimeout { + l.Infow("Request too old, dropping it", + "reqID", req.req.RequestId.String(), + "txHash", req.req.Raw.TxHash) + lsn.markLogAsConsumed(req.lb) + processed = append(processed, req.req.RequestId.String()) + incDroppedReqs(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, v2, reasonAge) + continue + } + + // This check to see if the log was consumed needs to be in the same + // goroutine as the mark consumed to avoid processing duplicates. + consumed, err := lsn.logBroadcaster.WasAlreadyConsumed(req.lb) + if err != nil { + // Do not process for now, retry on next iteration. + l.Errorw("Could not determine if log was already consumed", + "reqID", req.req.RequestId.String(), + "txHash", req.req.Raw.TxHash, + "error", err) + } else if consumed { + processed = append(processed, req.req.RequestId.String()) + } else { + unconsumed = append(unconsumed, req) + } + } + return +} + +func batchFulfillmentGasEstimate( + batchSize uint64, + maxCallbackGasLimit uint64, + gasMultiplier float64, +) uint64 { + return uint64( + gasMultiplier * float64((maxCallbackGasLimit+400_000)+batchSize*BatchFulfillmentIterationGasCost), + ) +} diff --git a/core/services/vrf/listener_v2_types_test.go b/core/services/vrf/listener_v2_types_test.go new file mode 100644 index 00000000000..14fec694d40 --- /dev/null +++ b/core/services/vrf/listener_v2_types_test.go @@ -0,0 +1,45 @@ +package vrf + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/core/chains/evm/log/mocks" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/vrf_coordinator_v2" + "github.com/smartcontractkit/chainlink/core/services/pipeline" +) + +func Test_BatchFulfillments_AddRun(t *testing.T) { + batchLimit := uint64(2500) + bfs := newBatchFulfillments(batchLimit) + for i := 0; i < 4; i++ { + bfs.addRun(vrfPipelineResult{ + gasLimit: 500, + req: pendingRequest{ + req: &vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{ + RequestId: big.NewInt(1), + }, + lb: &mocks.Broadcast{}, + }, + run: pipeline.NewRun(pipeline.Spec{}, pipeline.Vars{}), + }) + require.Len(t, bfs.fulfillments, 1) + } + + require.Equal(t, uint64(2000), bfs.fulfillments[0].totalGasLimit) + + // This addition should create and add a new batch + bfs.addRun(vrfPipelineResult{ + gasLimit: 500, + req: pendingRequest{ + req: &vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{ + RequestId: big.NewInt(1), + }, + lb: &mocks.Broadcast{}, + }, + run: pipeline.NewRun(pipeline.Spec{}, pipeline.Vars{}), + }) + require.Len(t, bfs.fulfillments, 2) +} diff --git a/core/services/vrf/mocks/aggregator_v3_interface.go b/core/services/vrf/mocks/aggregator_v3_interface.go new file mode 100644 index 00000000000..182af61acc2 --- /dev/null +++ b/core/services/vrf/mocks/aggregator_v3_interface.go @@ -0,0 +1,143 @@ +// Code generated by mockery v2.10.1. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + aggregator_v3_interface "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/aggregator_v3_interface" + + bind "github.com/ethereum/go-ethereum/accounts/abi/bind" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// AggregatorV3Interface is an autogenerated mock type for the AggregatorV3InterfaceInterface type +type AggregatorV3Interface struct { + mock.Mock +} + +// Address provides a mock function with given fields: +func (_m *AggregatorV3Interface) Address() common.Address { + ret := _m.Called() + + var r0 common.Address + if rf, ok := ret.Get(0).(func() common.Address); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + return r0 +} + +// Decimals provides a mock function with given fields: opts +func (_m *AggregatorV3Interface) Decimals(opts *bind.CallOpts) (uint8, error) { + ret := _m.Called(opts) + + var r0 uint8 + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint8); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint8) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Description provides a mock function with given fields: opts +func (_m *AggregatorV3Interface) Description(opts *bind.CallOpts) (string, error) { + ret := _m.Called(opts) + + var r0 string + if rf, ok := ret.Get(0).(func(*bind.CallOpts) string); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(string) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRoundData provides a mock function with given fields: opts, _roundId +func (_m *AggregatorV3Interface) GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (aggregator_v3_interface.GetRoundData, error) { + ret := _m.Called(opts, _roundId) + + var r0 aggregator_v3_interface.GetRoundData + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) aggregator_v3_interface.GetRoundData); ok { + r0 = rf(opts, _roundId) + } else { + r0 = ret.Get(0).(aggregator_v3_interface.GetRoundData) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, _roundId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestRoundData provides a mock function with given fields: opts +func (_m *AggregatorV3Interface) LatestRoundData(opts *bind.CallOpts) (aggregator_v3_interface.LatestRoundData, error) { + ret := _m.Called(opts) + + var r0 aggregator_v3_interface.LatestRoundData + if rf, ok := ret.Get(0).(func(*bind.CallOpts) aggregator_v3_interface.LatestRoundData); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(aggregator_v3_interface.LatestRoundData) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Version provides a mock function with given fields: opts +func (_m *AggregatorV3Interface) Version(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + var r0 *big.Int + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/core/services/vrf/mocks/config.go b/core/services/vrf/mocks/config.go new file mode 100644 index 00000000000..a6ddb12f7ec --- /dev/null +++ b/core/services/vrf/mocks/config.go @@ -0,0 +1,73 @@ +// Code generated by mockery v2.10.1. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + mock "github.com/stretchr/testify/mock" +) + +// Config is an autogenerated mock type for the Config type +type Config struct { + mock.Mock +} + +// EvmGasLimitDefault provides a mock function with given fields: +func (_m *Config) EvmGasLimitDefault() uint64 { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// KeySpecificMaxGasPriceWei provides a mock function with given fields: addr +func (_m *Config) KeySpecificMaxGasPriceWei(addr common.Address) *big.Int { + ret := _m.Called(addr) + + var r0 *big.Int + if rf, ok := ret.Get(0).(func(common.Address) *big.Int); ok { + r0 = rf(addr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + return r0 +} + +// MinIncomingConfirmations provides a mock function with given fields: +func (_m *Config) MinIncomingConfirmations() uint32 { + ret := _m.Called() + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// MinRequiredOutgoingConfirmations provides a mock function with given fields: +func (_m *Config) MinRequiredOutgoingConfirmations() uint64 { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} diff --git a/core/services/vrf/mocks/geth_key_store.go b/core/services/vrf/mocks/geth_key_store.go index ebd1a97a75d..b10746d3785 100644 --- a/core/services/vrf/mocks/geth_key_store.go +++ b/core/services/vrf/mocks/geth_key_store.go @@ -1,8 +1,10 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks import ( + big "math/big" + common "github.com/ethereum/go-ethereum/common" mock "github.com/stretchr/testify/mock" ) @@ -12,19 +14,20 @@ type GethKeyStore struct { mock.Mock } -// GetRoundRobinAddress provides a mock function with given fields: addresses -func (_m *GethKeyStore) GetRoundRobinAddress(addresses ...common.Address) (common.Address, error) { +// GetRoundRobinAddress provides a mock function with given fields: chainID, addresses +func (_m *GethKeyStore) GetRoundRobinAddress(chainID *big.Int, addresses ...common.Address) (common.Address, error) { _va := make([]interface{}, len(addresses)) for _i := range addresses { _va[_i] = addresses[_i] } var _ca []interface{} + _ca = append(_ca, chainID) _ca = append(_ca, _va...) ret := _m.Called(_ca...) var r0 common.Address - if rf, ok := ret.Get(0).(func(...common.Address) common.Address); ok { - r0 = rf(addresses...) + if rf, ok := ret.Get(0).(func(*big.Int, ...common.Address) common.Address); ok { + r0 = rf(chainID, addresses...) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(common.Address) @@ -32,8 +35,8 @@ func (_m *GethKeyStore) GetRoundRobinAddress(addresses ...common.Address) (commo } var r1 error - if rf, ok := ret.Get(1).(func(...common.Address) error); ok { - r1 = rf(addresses...) + if rf, ok := ret.Get(1).(func(*big.Int, ...common.Address) error); ok { + r1 = rf(chainID, addresses...) } else { r1 = ret.Error(1) } diff --git a/core/services/vrf/validate.go b/core/services/vrf/validate.go index 1b2cc00d669..e5afcc35eca 100644 --- a/core/services/vrf/validate.go +++ b/core/services/vrf/validate.go @@ -2,6 +2,7 @@ package vrf import ( "bytes" + "fmt" "time" "github.com/pelletier/go-toml" @@ -57,6 +58,24 @@ func ValidatedVRFSpec(tomlString string) (job.Job, error) { if spec.RequestTimeout == 0 { spec.RequestTimeout = 24 * time.Hour } + + if spec.BatchFulfillmentEnabled && spec.BatchCoordinatorAddress == nil { + return jb, errors.Wrap(ErrKeyNotSet, "batch coordinator address must be provided if batchFulfillmentEnabled = true") + } + + if spec.BatchFulfillmentGasMultiplier <= 0 { + spec.BatchFulfillmentGasMultiplier = 1.15 + } + + if spec.ChunkSize == 0 { + spec.ChunkSize = 20 + } + + if spec.BackoffMaxDelay < spec.BackoffInitialDelay { + return jb, fmt.Errorf("backoff max delay (%s) cannot be less than backoff initial delay (%s)", + spec.BackoffMaxDelay.String(), spec.BackoffInitialDelay.String()) + } + var foundVRFTask bool for _, t := range jb.Pipeline.Tasks { if t.Type() == pipeline.TaskTypeVRF || t.Type() == pipeline.TaskTypeVRFV2 { diff --git a/core/services/vrf/validate_test.go b/core/services/vrf/validate_test.go index 2a8c7646916..9a5f89ce718 100644 --- a/core/services/vrf/validate_test.go +++ b/core/services/vrf/validate_test.go @@ -26,6 +26,9 @@ minIncomingConfirmations = 10 publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" requestTimeout = "168h" # 7 days +chunkSize = 25 +backoffInitialDelay = "1m" +backoffMaxDelay = "2h" observationSource = """ decode_log [type=ethabidecodelog abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" @@ -52,6 +55,9 @@ decode_log->vrf->encode_tx->submit_tx assert.Equal(t, "0xB3b7874F13387D44a3398D298B075B7A3505D8d4", s.VRFSpec.CoordinatorAddress.String()) assert.Equal(t, "0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179800", s.VRFSpec.PublicKey.String()) require.Equal(t, 168*time.Hour, s.VRFSpec.RequestTimeout) + require.Equal(t, time.Minute, s.VRFSpec.BackoffInitialDelay) + require.Equal(t, 2*time.Hour, s.VRFSpec.BackoffMaxDelay) + require.EqualValues(t, 25, s.VRFSpec.ChunkSize) }, }, { @@ -318,6 +324,111 @@ decode_log->vrf->encode_tx->submit_tx require.Equal(t, 7*24*time.Hour, os.VRFSpec.RequestTimeout) }, }, + { + name: "batch fulfillment enabled, no batch coordinator address", + toml: ` + type = "vrf" + schemaVersion = 1 + minIncomingConfirmations = 10 + requestedConfsDelay = 10 + batchFulfillmentEnabled = true + publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" + coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" + externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" + observationSource = """ + decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] + vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] + encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] + submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] + decode_log->vrf->encode_tx->submit_tx + """ + `, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "batch fulfillment enabled, batch coordinator address provided", + toml: ` + type = "vrf" + schemaVersion = 1 + minIncomingConfirmations = 10 + requestedConfsDelay = 10 + batchFulfillmentEnabled = true + batchCoordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" + publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" + coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" + externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" + observationSource = """ + decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] + vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] + encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] + submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] + decode_log->vrf->encode_tx->submit_tx + """ + `, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Equal(t, "0xB3b7874F13387D44a3398D298B075B7A3505D8d4", os.VRFSpec.BatchCoordinatorAddress.String()) + }, + }, + { + name: "initial delay must be <= max delay, invalid", + toml: ` +type = "vrf" +schemaVersion = 1 +minIncomingConfirmations = 10 +publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" +coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" +requestTimeout = "168h" # 7 days +chunkSize = 25 +backoffInitialDelay = "1h" +backoffMaxDelay = "30m" +observationSource = """ +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] +decode_log->vrf->encode_tx->submit_tx +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.Error(t, err) + }, + }, } for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { diff --git a/core/services/webhook/mocks/external_initiator_manager.go b/core/services/webhook/mocks/external_initiator_manager.go index 24dee6c257d..eb3092fe6aa 100644 --- a/core/services/webhook/mocks/external_initiator_manager.go +++ b/core/services/webhook/mocks/external_initiator_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/services/webhook/mocks/http_client.go b/core/services/webhook/mocks/http_client.go index 1676baa851c..74d600d46c1 100644 --- a/core/services/webhook/mocks/http_client.go +++ b/core/services/webhook/mocks/http_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/sessions/mocks/orm.go b/core/sessions/mocks/orm.go index 9bd7dffcacf..d442d231bbc 100644 --- a/core/sessions/mocks/orm.go +++ b/core/sessions/mocks/orm.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/static/static.go b/core/static/static.go index 7b640390417..301b1400027 100644 --- a/core/static/static.go +++ b/core/static/static.go @@ -69,10 +69,3 @@ func SetConsumerName(uri *url.URL, name string, id *uuid.UUID) { q.Set("application_name", applicationName) uri.RawQuery = q.Encode() } - -//nolint -const ( - EvmMaxInFlightTransactionsWarningLabel = `WARNING: If this happens a lot, you may need to increase ETH_MAX_IN_FLIGHT_TRANSACTIONS to boost your node's transaction throughput, however you do this at your own risk. You MUST first ensure your ethereum node is configured not to ever evict local transactions that exceed this number otherwise the node can get permanently stuck` - EvmMaxQueuedTransactionsLabel = `WARNING: Hitting ETH_MAX_QUEUED_TRANSACTIONS is a sanity limit and should never happen under normal operation. This error is very unlikely to be a problem with Chainlink, and instead more likely to be caused by a problem with your eth node's connectivity. Check your eth node: it may not be broadcasting transactions to the network, or it might be overloaded and evicting Chainlink's transactions from its mempool. Increasing ETH_MAX_QUEUED_TRANSACTIONS is almost certainly not the correct action to take here unless you ABSOLUTELY know what you are doing, and will probably make things worse` - EthNodeConnectivityProblemLabel = `WARNING: If this happens a lot, it may be a sign that your eth node has a connectivity problem, and your transactions are not making it to any miners` -) diff --git a/core/store/migrate/migrate_test.go b/core/store/migrate/migrate_test.go index 145bd7846be..a7c12e3a1f9 100644 --- a/core/store/migrate/migrate_test.go +++ b/core/store/migrate/migrate_test.go @@ -60,7 +60,7 @@ func getOCR2Spec100() OffchainReporting2OracleSpec100 { } func TestMigrate_0100_BootstrapConfigs(t *testing.T) { - _, db := heavyweight.FullTestDB(t, migrationDir, false, false) + _, db := heavyweight.FullTestDBEmpty(t, migrationDir) lggr := logger.TestLogger(t) cfg := configtest.NewTestGeneralConfig(t) err := goose.UpTo(db.DB, migrationDir, 99) @@ -330,7 +330,7 @@ ON jobs.offchainreporting2_oracle_spec_id = ocr2.id` } func TestMigrate_101_GenericOCR2(t *testing.T) { - _, db := heavyweight.FullTestDB(t, migrationDir, false, false) + _, db := heavyweight.FullTestDBEmpty(t, migrationDir) err := goose.UpTo(db.DB, migrationDir, 100) require.NoError(t, err) diff --git a/core/store/migrate/migrations/0108_upgrade_keepers_tx_meta.sql b/core/store/migrate/migrations/0108_upgrade_keepers_tx_meta.sql new file mode 100644 index 00000000000..bfed0cfd965 --- /dev/null +++ b/core/store/migrate/migrations/0108_upgrade_keepers_tx_meta.sql @@ -0,0 +1,63 @@ +-- +goose Up +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + from="[$(jobSpec.fromAddress)]" + evmChainID="$(jobSpec.evmChainID)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID),\"upkeepID\":$(jobSpec.upkeepID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' AND schema_version = 3 +); + +-- +goose Down +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' AND schema_version = 3 +); diff --git a/core/store/migrate/migrations/0109_solana_chains_nodes.sql b/core/store/migrate/migrations/0109_solana_chains_nodes.sql new file mode 100644 index 00000000000..6339f4e5ed4 --- /dev/null +++ b/core/store/migrate/migrations/0109_solana_chains_nodes.sql @@ -0,0 +1,26 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE solana_chains ( + id text PRIMARY KEY, + cfg jsonb NOT NULL DEFAULT '{}', + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + enabled BOOL DEFAULT TRUE NOT NULL +); +CREATE TABLE solana_nodes ( + id serial PRIMARY KEY, + name varchar(255) NOT NULL CHECK (name != ''), + solana_chain_id text NOT NULL REFERENCES solana_chains (id) ON DELETE CASCADE, + solana_url text CHECK (solana_url != ''), + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL +); +CREATE INDEX idx_nodes_solana_chain_id ON solana_nodes (solana_chain_id); +CREATE UNIQUE INDEX idx_solana_nodes_unique_name ON solana_nodes (lower(name)); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE solana_nodes; +DROP TABLE solana_chains; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0110_add_vrf_chunk_size.sql b/core/store/migrate/migrations/0110_add_vrf_chunk_size.sql new file mode 100644 index 00000000000..4458f51a01a --- /dev/null +++ b/core/store/migrate/migrations/0110_add_vrf_chunk_size.sql @@ -0,0 +1,5 @@ +-- +goose Up +ALTER TABLE vrf_specs ADD COLUMN chunk_size bigint NOT NULL DEFAULT 20; + +-- +goose Down +ALTER TABLE vrf_specs DROP COLUMN chunk_size; diff --git a/core/store/migrate/migrations/0111_terra_msgs_state_started.sql b/core/store/migrate/migrations/0111_terra_msgs_state_started.sql new file mode 100644 index 00000000000..4b92ccb4b06 --- /dev/null +++ b/core/store/migrate/migrations/0111_terra_msgs_state_started.sql @@ -0,0 +1,46 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE INDEX idx_terra_msgs_terra_chain_id_contract_id_state ON terra_msgs (terra_chain_id, contract_id, state); + +CREATE OR REPLACE FUNCTION check_terra_msg_state_transition() RETURNS TRIGGER AS $$ +DECLARE +state_transition_map jsonb := json_build_object( + 'unstarted', json_build_object('errored', true, 'started', true), + 'started', json_build_object('errored', true, 'broadcasted', true), + 'broadcasted', json_build_object('errored', true, 'confirmed', true)); +BEGIN + IF NOT state_transition_map ? OLD.state THEN + RAISE EXCEPTION 'Invalid from state %. Valid from states %', OLD.state, state_transition_map; +END IF; + IF NOT state_transition_map->OLD.state ? NEW.state THEN + RAISE EXCEPTION 'Invalid state transition from % to %. Valid to states %', OLD.state, NEW.state, state_transition_map->OLD.state; +END IF; +RETURN NEW; +END +$$ LANGUAGE plpgsql; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +CREATE OR REPLACE FUNCTION check_terra_msg_state_transition() RETURNS TRIGGER AS $$ +DECLARE +state_transition_map jsonb := json_build_object( + 'unstarted', json_build_object('errored', true, 'broadcasted', true), + 'broadcasted', json_build_object('errored', true, 'confirmed', true)); +BEGIN + IF NOT state_transition_map ? OLD.state THEN + RAISE EXCEPTION 'Invalid from state %. Valid from states %', OLD.state, state_transition_map; +END IF; + IF NOT state_transition_map->OLD.state ? NEW.state THEN + RAISE EXCEPTION 'Invalid state transition from % to %. Valid to states %', OLD.state, NEW.state, state_transition_map->OLD.state; +END IF; +RETURN NEW; +END +$$ LANGUAGE plpgsql; + +DROP INDEX idx_terra_msgs_terra_chain_id_contract_id_state; + +-- +goose StatementEnd \ No newline at end of file diff --git a/core/store/migrate/migrations/0112_vrf_batch_coordinator_address.sql b/core/store/migrate/migrations/0112_vrf_batch_coordinator_address.sql new file mode 100644 index 00000000000..c2c3f94ecf2 --- /dev/null +++ b/core/store/migrate/migrations/0112_vrf_batch_coordinator_address.sql @@ -0,0 +1,9 @@ +-- +goose Up +ALTER TABLE vrf_specs ADD COLUMN batch_coordinator_address bytea, + ADD COLUMN batch_fulfillment_enabled bool NOT NULL DEFAULT false, + ADD COLUMN batch_fulfillment_gas_multiplier double precision NOT NULL DEFAULT 1.15; + +-- +goose Down +ALTER TABLE vrf_specs DROP COLUMN batch_coordinator_address, + DROP COLUMN batch_fulfillment_enabled, + DROP COLUMN batch_fulfillment_gas_multiplier; diff --git a/core/store/migrate/migrations/0113_vrf_v2_backoff_columns.sql b/core/store/migrate/migrations/0113_vrf_v2_backoff_columns.sql new file mode 100644 index 00000000000..5bc0813b33c --- /dev/null +++ b/core/store/migrate/migrations/0113_vrf_v2_backoff_columns.sql @@ -0,0 +1,17 @@ +-- +goose Up +ALTER TABLE vrf_specs + ADD COLUMN "backoff_initial_delay" BIGINT + CHECK (backoff_initial_delay >= 0) + DEFAULT 0 + NOT NULL; + +ALTER TABLE vrf_specs + ADD COLUMN "backoff_max_delay" BIGINT + CHECK (backoff_max_delay >= 0) + DEFAULT 0 + NOT NULL; + +-- +goose Down +ALTER TABLE vrf_specs DROP COLUMN "backoff_initial_delay"; + +ALTER TABLE vrf_specs DROP COLUMN "backoff_max_delay"; diff --git a/core/store/migrate/migrations/0114_add_last_keeper_id_to_upkeep_table.sql b/core/store/migrate/migrations/0114_add_last_keeper_id_to_upkeep_table.sql new file mode 100644 index 00000000000..3f13ce98cbf --- /dev/null +++ b/core/store/migrate/migrations/0114_add_last_keeper_id_to_upkeep_table.sql @@ -0,0 +1,11 @@ +-- +goose Up +ALTER TABLE upkeep_registrations + ADD COLUMN IF NOT EXISTS last_keeper_index integer DEFAULT NULL; +ALTER TABLE keeper_registries + ADD COLUMN IF NOT EXISTS keeper_index_map jsonb DEFAULT NULL; + +-- +goose Down +ALTER TABLE upkeep_registrations + DROP COLUMN IF EXISTS last_keeper_index; +ALTER TABLE keeper_registries + DROP COLUMN IF EXISTS keeper_index_map; diff --git a/core/store/migrate/migrations/0115_log_poller.sql b/core/store/migrate/migrations/0115_log_poller.sql new file mode 100644 index 00000000000..ff29310df08 --- /dev/null +++ b/core/store/migrate/migrations/0115_log_poller.sql @@ -0,0 +1,32 @@ +-- +goose Up +CREATE TABLE logs ( + evm_chain_id numeric(78,0) NOT NULL REFERENCES evm_chains (id) DEFERRABLE, + log_index bigint NOT NULL, + block_hash bytea NOT NULL, + block_number bigint NOT NULL CHECK (block_number > 0), + address bytea NOT NULL, + event_sig bytea NOT NULL, + topics bytea[] NOT NULL, + tx_hash bytea NOT NULL, + data bytea NOT NULL, + created_at timestamptz NOT NULL, + PRIMARY KEY (block_hash, log_index, evm_chain_id) +); + +-- Hot path query - clients searching for their logs. +CREATE INDEX logs_idx ON logs(evm_chain_id, block_number, address, event_sig); + +CREATE TABLE log_poller_blocks ( + evm_chain_id numeric(78,0) NOT NULL REFERENCES evm_chains (id) DEFERRABLE, + block_hash bytea NOT NULL, + block_number bigint NOT NULL CHECK (block_number > 0), + created_at timestamptz NOT NULL, + -- Only permit one block_number at a time + -- i.e. the poller is only ever aware of the canonical branch + PRIMARY KEY (block_number, evm_chain_id) +); + +-- +goose Down +DROP INDEX logs_idx; +DROP TABLE logs; +DROP TABLE log_poller_blocks; diff --git a/core/store/models/solana/common.go b/core/store/models/solana/common.go new file mode 100644 index 00000000000..2ef0379252c --- /dev/null +++ b/core/store/models/solana/common.go @@ -0,0 +1,11 @@ +package solana + +import "github.com/gagliardetto/solana-go" + +type SendRequest struct { + From solana.PublicKey `json:"from"` + To solana.PublicKey `json:"to"` + Amount uint64 `json:"amount"` + SolanaChainID string `json:"solanaChainID"` + AllowHigherAmounts bool `json:"allowHigherAmounts"` +} diff --git a/core/testdata/testspecs/v2_specs.go b/core/testdata/testspecs/v2_specs.go index 02b7f1ce00f..f8e4f6df3ab 100644 --- a/core/testdata/testspecs/v2_specs.go +++ b/core/testdata/testspecs/v2_specs.go @@ -2,6 +2,7 @@ package testspecs import ( "fmt" + "strconv" "strings" "time" @@ -22,6 +23,18 @@ ds_parse [type=jsonparse path="data,price"]; ds_multiply [type=multiply times=100]; ds -> ds_parse -> ds_multiply; """ +` + CronSpecDotSep = ` +type = "cron" +schemaVersion = 1 +schedule = "CRON_TZ=UTC * 0 0 1 1 *" +externalJobID = "123e4567-e89b-12d3-a456-426655440013" +observationSource = """ +ds [type=http method=GET url="https://chain.link/ETH-USD"]; +ds_parse [type=jsonparse path="data.price" separator="."]; +ds_multiply [type=multiply times=100]; +ds -> ds_parse -> ds_multiply; +""" ` DirectRequestSpecNoExternalJobID = ` type = "directrequest" @@ -97,26 +110,21 @@ answer1 [type=median index=0]; schemaVersion = 1 name = "local testing job" contractID = "VT3AvPr2nyE9Kr7ydDXVvgvJXyBr9tHA5hd6a1GBGBx" -isBootstrapPeer = false p2pBootstrapPeers = [] relay = "solana" +pluginType = "median" transmitterID = "8AuzafoGEz92Z3WGFfKuEh2Ca794U3McLJBy7tfmDynK" observationSource = """ """ +[pluginConfig] juelsPerFeeCoinSource = """ """ [relayConfig] -nodeEndpointHTTP = "http://127.0.0.1:8899" ocr2ProgramID = "CF13pnKGJ1WJZeEgVAtFdUi4MMndXm9hneiHs8azUaZt" storeProgramID = "A7Jh2nb1hZHwqEofm4N8SXbKTj82rx7KUfjParQXUyMQ" transmissionsID = "J6RRmA39u8ZBwrMvRPrJA3LMdg73trb6Qhfo8vjSeadg" -usePreflight = true -commitment = "processed" -txTimeout = "1m" -pollingInterval = "2s" -pollingCtxTimeout = "4s" -staleTimeout = "30s"` +chainID = "Chainlink-99"` OCR2TerraSpecMinimal = `type = "offchainreporting2" schemaVersion = 1 name = "local testing job" @@ -228,7 +236,7 @@ perform_upkeep_tx [type=ethtx evmChainID="$(jobSpec.evmChainID)" data="$(encode_perform_upkeep_tx)" gasLimit="$(jobSpec.performUpkeepGasLimit)" - txMeta="{\\"jobID\\":$(jobSpec.jobID)}"] + txMeta="{\\"jobID\\":$(jobSpec.jobID),\\"upkeepID\\":$(jobSpec.upkeepID)}"] encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx """ ` @@ -239,16 +247,22 @@ encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_pe } type VRFSpecParams struct { - JobID string - Name string - CoordinatorAddress string - MinIncomingConfirmations int - FromAddresses []string - PublicKey string - ObservationSource string - RequestedConfsDelay int - RequestTimeout time.Duration - V2 bool + JobID string + Name string + CoordinatorAddress string + BatchCoordinatorAddress string + BatchFulfillmentEnabled bool + BatchFulfillmentGasMultiplier float64 + MinIncomingConfirmations int + FromAddresses []string + PublicKey string + ObservationSource string + RequestedConfsDelay int + RequestTimeout time.Duration + V2 bool + ChunkSize int + BackoffInitialDelay time.Duration + BackoffMaxDelay time.Duration } type VRFSpec struct { @@ -273,6 +287,14 @@ func GenerateVRFSpec(params VRFSpecParams) VRFSpec { if params.CoordinatorAddress != "" { coordinatorAddress = params.CoordinatorAddress } + batchCoordinatorAddress := "0x5C7B1d96CA3132576A84423f624C2c492f668Fea" + if params.BatchCoordinatorAddress != "" { + batchCoordinatorAddress = params.BatchCoordinatorAddress + } + batchFulfillmentGasMultiplier := 1.0 + if params.BatchFulfillmentGasMultiplier >= 1.0 { + batchFulfillmentGasMultiplier = params.BatchFulfillmentGasMultiplier + } confirmations := 6 if params.MinIncomingConfirmations != 0 { confirmations = params.MinIncomingConfirmations @@ -285,6 +307,10 @@ func GenerateVRFSpec(params VRFSpecParams) VRFSpec { if params.PublicKey != "" { publicKey = params.PublicKey } + chunkSize := 20 + if params.ChunkSize != 0 { + chunkSize = params.ChunkSize + } observationSource := fmt.Sprintf(` decode_log [type=ethabidecodelog abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" @@ -340,16 +366,25 @@ type = "vrf" schemaVersion = 1 name = "%s" coordinatorAddress = "%s" +batchCoordinatorAddress = "%s" +batchFulfillmentEnabled = %v +batchFulfillmentGasMultiplier = %s minIncomingConfirmations = %d requestedConfsDelay = %d requestTimeout = "%s" publicKey = "%s" +chunkSize = %d +backoffInitialDelay = "%s" +backoffMaxDelay = "%s" observationSource = """ %s """ ` - toml := fmt.Sprintf(template, jobID, name, coordinatorAddress, confirmations, params.RequestedConfsDelay, - requestTimeout.String(), publicKey, observationSource) + toml := fmt.Sprintf(template, + jobID, name, coordinatorAddress, batchCoordinatorAddress, + params.BatchFulfillmentEnabled, strconv.FormatFloat(batchFulfillmentGasMultiplier, 'f', 2, 64), + confirmations, params.RequestedConfsDelay, requestTimeout.String(), publicKey, chunkSize, + params.BackoffInitialDelay.String(), params.BackoffMaxDelay.String(), observationSource) if len(params.FromAddresses) != 0 { var addresses []string for _, address := range params.FromAddresses { @@ -362,11 +397,16 @@ observationSource = """ JobID: jobID, Name: name, CoordinatorAddress: coordinatorAddress, + BatchCoordinatorAddress: batchCoordinatorAddress, + BatchFulfillmentEnabled: params.BatchFulfillmentEnabled, MinIncomingConfirmations: confirmations, PublicKey: publicKey, ObservationSource: observationSource, RequestedConfsDelay: params.RequestedConfsDelay, RequestTimeout: requestTimeout, + ChunkSize: chunkSize, + BackoffInitialDelay: params.BackoffInitialDelay, + BackoffMaxDelay: params.BackoffMaxDelay, }, toml: toml} } @@ -374,8 +414,10 @@ type OCRSpecParams struct { JobID string Name string TransmitterAddress string + ContractAddress string DS1BridgeName string DS2BridgeName string + EVMChainID string } type OCRSpec struct { @@ -396,6 +438,10 @@ func GenerateOCRSpec(params OCRSpecParams) OCRSpec { if params.TransmitterAddress != "" { transmitterAddress = params.TransmitterAddress } + contractAddress := "0x613a38AC1659769640aaE063C651F48E0250454C" + if params.ContractAddress != "" { + contractAddress = params.ContractAddress + } name := "web oracle spec" if params.Name != "" { name = params.Name @@ -408,11 +454,17 @@ func GenerateOCRSpec(params OCRSpecParams) OCRSpec { if params.DS2BridgeName != "" { ds2BridgeName = params.DS2BridgeName } + // set to empty so it defaults to the default evm chain id + evmChainID := "0" + if params.EVMChainID != "" { + evmChainID = params.EVMChainID + } template := ` type = "offchainreporting" schemaVersion = 1 name = "%s" -contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +contractAddress = "%s" +evmChainID = %s p2pPeerID = "12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X" externalJobID = "%s" p2pBootstrapPeers = [ @@ -451,7 +503,7 @@ observationSource = """ TransmitterAddress: transmitterAddress, DS1BridgeName: ds1BridgeName, DS2BridgeName: ds2BridgeName, - }, toml: fmt.Sprintf(template, name, jobID, transmitterAddress, ds1BridgeName, ds2BridgeName)} + }, toml: fmt.Sprintf(template, name, contractAddress, evmChainID, jobID, transmitterAddress, ds1BridgeName, ds2BridgeName)} } type WebhookSpecParams struct { diff --git a/core/utils/big_math/big_math.go b/core/utils/big_math/big_math.go index 77f4808cec2..c896ab31def 100644 --- a/core/utils/big_math/big_math.go +++ b/core/utils/big_math/big_math.go @@ -35,6 +35,27 @@ func Mod(dividend, divisor interface{}) *big.Int { return I().Mod(bnIfy(dividend // Sub performs subtraction with the given values after coercing them to big.Int, or panics if it cannot. func Sub(minuend, subtrahend interface{}) *big.Int { return I().Sub(bnIfy(minuend), bnIfy(subtrahend)) } +// Max returns the maximum of the two given values after coercing them to big.Int, +// or panics if it cannot. +func Max(x, y interface{}) *big.Int { + xBig := bnIfy(x) + yBig := bnIfy(y) + if xBig.Cmp(yBig) == 1 { + return xBig + } + return yBig +} + +// Accumulate returns the sum of the given slice after coercing all elements +// to a big.Int, or panics if it cannot. +func Accumulate(s []interface{}) (r *big.Int) { + r = big.NewInt(0) + for _, e := range s { + r.Add(r, bnIfy(e)) + } + return +} + func bnIfy(val interface{}) *big.Int { switch v := val.(type) { case uint: diff --git a/core/utils/big_math/big_math_test.go b/core/utils/big_math/big_math_test.go new file mode 100644 index 00000000000..ddf5e0a955e --- /dev/null +++ b/core/utils/big_math/big_math_test.go @@ -0,0 +1,55 @@ +package bigmath + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMax(t *testing.T) { + testCases := []struct { + x interface{} + y interface{} + expected *big.Int + }{ + { + x: int32(1), + y: int32(2), + expected: big.NewInt(2), + }, + { + x: big.NewInt(1), + y: big.NewInt(2), + expected: big.NewInt(2), + }, + { + x: float64(1.0), + y: float64(2.0), + expected: big.NewInt(2), + }, + { + x: "1", + y: "2", + expected: big.NewInt(2), + }, + { + x: uint(1), + y: uint(2), + expected: big.NewInt(2), + }, + } + for _, testCase := range testCases { + m := Max(testCase.x, testCase.y) + require.Equal(t, 0, testCase.expected.Cmp(m)) + } +} + +func TestAccumulate(t *testing.T) { + s := []interface{}{1, 2, 3, 4, 5} + expected := big.NewInt(15) + require.Equal(t, expected, Accumulate(s)) + s = []interface{}{} + expected = big.NewInt(0) + require.Equal(t, expected, Accumulate(s)) +} diff --git a/core/utils/hex.go b/core/utils/hex.go deleted file mode 100644 index c6d781c02aa..00000000000 --- a/core/utils/hex.go +++ /dev/null @@ -1,12 +0,0 @@ -package utils - -import ( - "encoding/hex" - "strings" -) - -// IsHexBytes returns true if the given bytes array is basically HEX encoded value. -func IsHexBytes(arr []byte) bool { - _, err := hex.DecodeString(strings.TrimPrefix(string(arr), "0x")) - return err == nil -} diff --git a/core/utils/hex_test.go b/core/utils/hex_test.go deleted file mode 100644 index 660b1d32d03..00000000000 --- a/core/utils/hex_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package utils - -import ( - "encoding/hex" - "testing" -) - -func TestIsHexBytes(t *testing.T) { - type args struct { - arr []byte - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "hex string with prefix", - args: args{ - arr: []byte("0x" + hex.EncodeToString([]byte(`test`))), - }, - want: true, - }, - { - name: "hex string without prefix", - args: args{ - arr: []byte(hex.EncodeToString([]byte(`test`))), - }, - want: true, - }, - { - name: "not a hex string", - args: args{ - arr: []byte(`123 not hex`), - }, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := IsHexBytes(tt.args.arr); got != tt.want { - t.Errorf("IsHexBytes() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/core/utils/mailbox.go b/core/utils/mailbox.go index c5ea8d96d1d..002b7eb2aa5 100644 --- a/core/utils/mailbox.go +++ b/core/utils/mailbox.go @@ -8,10 +8,10 @@ import ( // a mutual exclusive lock, // a queue of interfaces, // and a queue capacity. -type Mailbox struct { +type Mailbox[T any] struct { chNotify chan struct{} mu sync.Mutex - queue []interface{} + queue []T // capacity - number of items the mailbox can buffer // NOTE: if the capacity is 1, it's possible that an empty Retrieve may occur after a notification. @@ -20,34 +20,34 @@ type Mailbox struct { // NewHighCapacityMailbox create a new mailbox with a capacity // that is better able to handle e.g. large log replays -func NewHighCapacityMailbox() *Mailbox { - return NewMailbox(100000) +func NewHighCapacityMailbox[T any]() *Mailbox[T] { + return NewMailbox[T](100000) } // NewMailbox creates a new mailbox instance -func NewMailbox(capacity uint64) *Mailbox { +func NewMailbox[T any](capacity uint64) *Mailbox[T] { queueCap := capacity if queueCap == 0 { queueCap = 100 } - return &Mailbox{ + return &Mailbox[T]{ chNotify: make(chan struct{}, 1), - queue: make([]interface{}, 0, queueCap), + queue: make([]T, 0, queueCap), capacity: capacity, } } // Notify returns the contents of the notify channel -func (m *Mailbox) Notify() chan struct{} { +func (m *Mailbox[T]) Notify() chan struct{} { return m.chNotify } -// Deliver appends an interface to the queue -func (m *Mailbox) Deliver(x interface{}) (wasOverCapacity bool) { +// Deliver appends to the queue +func (m *Mailbox[T]) Deliver(x T) (wasOverCapacity bool) { m.mu.Lock() defer m.mu.Unlock() - m.queue = append([]interface{}{x}, m.queue...) + m.queue = append([]T{x}, m.queue...) if uint64(len(m.queue)) > m.capacity && m.capacity > 0 { m.queue = m.queue[:len(m.queue)-1] wasOverCapacity = true @@ -60,26 +60,27 @@ func (m *Mailbox) Deliver(x interface{}) (wasOverCapacity bool) { return } -// Retrieve fetches an interface from the queue -func (m *Mailbox) Retrieve() (interface{}, bool) { +// Retrieve fetches from the queue +func (m *Mailbox[T]) Retrieve() (t T, ok bool) { m.mu.Lock() defer m.mu.Unlock() if len(m.queue) == 0 { - return nil, false + return } - x := m.queue[len(m.queue)-1] + t = m.queue[len(m.queue)-1] m.queue = m.queue[:len(m.queue)-1] - return x, true + ok = true + return } // RetrieveLatestAndClear returns the latest value (or nil), and clears the queue. -func (m *Mailbox) RetrieveLatestAndClear() interface{} { +func (m *Mailbox[T]) RetrieveLatestAndClear() (t T) { m.mu.Lock() defer m.mu.Unlock() if len(m.queue) == 0 { - return nil + return } - x := m.queue[0] + t = m.queue[0] m.queue = nil - return x + return } diff --git a/core/utils/mailbox_test.go b/core/utils/mailbox_test.go index 6f63d518337..f7deaf45562 100644 --- a/core/utils/mailbox_test.go +++ b/core/utils/mailbox_test.go @@ -14,7 +14,7 @@ func TestMailbox(t *testing.T) { ) const capacity = 10 - m := utils.NewMailbox(capacity) + m := utils.NewMailbox[int](capacity) // Queue deliveries for i, d := range toDeliver { @@ -37,7 +37,7 @@ func TestMailbox(t *testing.T) { if !exists { break } - recvd = append(recvd, x.(int)) + recvd = append(recvd, x) } } }() @@ -49,7 +49,7 @@ func TestMailbox(t *testing.T) { } func TestMailbox_NoEmptyReceivesWhenCapacityIsTwo(t *testing.T) { - m := utils.NewMailbox(2) + m := utils.NewMailbox[int](2) var ( recvd []int @@ -64,7 +64,7 @@ func TestMailbox_NoEmptyReceivesWhenCapacityIsTwo(t *testing.T) { if !exists { emptyReceives = append(emptyReceives, recvd[len(recvd)-1]) } else { - recvd = append(recvd, x.(int)) + recvd = append(recvd, x) } } }() diff --git a/core/utils/mocks/disk_stats_provider.go b/core/utils/mocks/disk_stats_provider.go index a0c45b037df..502acdc3b12 100644 --- a/core/utils/mocks/disk_stats_provider.go +++ b/core/utils/mocks/disk_stats_provider.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.8.0. DO NOT EDIT. +// Code generated by mockery v2.10.1. DO NOT EDIT. package mocks diff --git a/core/utils/utils.go b/core/utils/utils.go index d18d4cfe783..4b02f9b6ba4 100644 --- a/core/utils/utils.go +++ b/core/utils/utils.go @@ -487,85 +487,84 @@ func (da *dependentAwaiter) DependentReady() { } // BoundedQueue is a FIFO queue that discards older items when it reaches its capacity. -type BoundedQueue struct { - capacity uint - items []interface{} - mu *sync.RWMutex +type BoundedQueue[T any] struct { + capacity int + items []T + mu sync.RWMutex } // NewBoundedQueue creates a new BoundedQueue instance -func NewBoundedQueue(capacity uint) *BoundedQueue { - return &BoundedQueue{ - capacity: capacity, - mu: &sync.RWMutex{}, - } +func NewBoundedQueue[T any](capacity int) *BoundedQueue[T] { + var bq BoundedQueue[T] + bq.capacity = capacity + return &bq } // Add appends items to a BoundedQueue -func (q *BoundedQueue) Add(x interface{}) { +func (q *BoundedQueue[T]) Add(x T) { q.mu.Lock() defer q.mu.Unlock() q.items = append(q.items, x) - if uint(len(q.items)) > q.capacity { - excess := uint(len(q.items)) - q.capacity + if len(q.items) > q.capacity { + excess := len(q.items) - q.capacity q.items = q.items[excess:] } } // Take pulls the first item from the array and removes it -func (q *BoundedQueue) Take() interface{} { +func (q *BoundedQueue[T]) Take() (t T) { q.mu.Lock() defer q.mu.Unlock() if len(q.items) == 0 { - return nil + return } - x := q.items[0] + t = q.items[0] q.items = q.items[1:] - return x + return } // Empty check is a BoundedQueue is empty -func (q *BoundedQueue) Empty() bool { +func (q *BoundedQueue[T]) Empty() bool { q.mu.RLock() defer q.mu.RUnlock() return len(q.items) == 0 } // Full checks if a BoundedQueue is over capacity. -func (q *BoundedQueue) Full() bool { +func (q *BoundedQueue[T]) Full() bool { q.mu.RLock() defer q.mu.RUnlock() - return uint(len(q.items)) >= q.capacity + return len(q.items) >= q.capacity } // BoundedPriorityQueue stores a series of BoundedQueues // with associated priorities and capacities -type BoundedPriorityQueue struct { - queues map[uint]*BoundedQueue +type BoundedPriorityQueue[T any] struct { + queues map[uint]*BoundedQueue[T] priorities []uint - capacities map[uint]uint - mu *sync.RWMutex + capacities map[uint]int + mu sync.RWMutex } // NewBoundedPriorityQueue creates a new BoundedPriorityQueue -func NewBoundedPriorityQueue(capacities map[uint]uint) *BoundedPriorityQueue { - queues := make(map[uint]*BoundedQueue) +func NewBoundedPriorityQueue[T any](capacities map[uint]int) *BoundedPriorityQueue[T] { + queues := make(map[uint]*BoundedQueue[T]) var priorities []uint for priority, capacity := range capacities { priorities = append(priorities, priority) - queues[priority] = NewBoundedQueue(capacity) + queues[priority] = NewBoundedQueue[T](capacity) } sort.Slice(priorities, func(i, j int) bool { return priorities[i] < priorities[j] }) - return &BoundedPriorityQueue{ + bpq := BoundedPriorityQueue[T]{ queues: queues, priorities: priorities, capacities: capacities, - mu: &sync.RWMutex{}, } + return &bpq } // Add pushes an item into a subque within a BoundedPriorityQueue -func (q *BoundedPriorityQueue) Add(priority uint, x interface{}) { +func (q *BoundedPriorityQueue[T]) Add(priority uint, x T) { q.mu.Lock() defer q.mu.Unlock() @@ -578,7 +577,7 @@ func (q *BoundedPriorityQueue) Add(priority uint, x interface{}) { } // Take takes from the BoundedPriorityQueue's subque -func (q *BoundedPriorityQueue) Take() interface{} { +func (q *BoundedPriorityQueue[T]) Take() (t T) { q.mu.Lock() defer q.mu.Unlock() @@ -589,12 +588,12 @@ func (q *BoundedPriorityQueue) Take() interface{} { } return queue.Take() } - return nil + return } // Empty checks the BoundedPriorityQueue // if all subqueues are empty -func (q *BoundedPriorityQueue) Empty() bool { +func (q *BoundedPriorityQueue[T]) Empty() bool { q.mu.RLock() defer q.mu.RUnlock() @@ -1005,3 +1004,13 @@ func BoxOutput(errorMsgTemplate string, errorMsgValues ...interface{}) string { return "\n" + output + "↗" + strings.Repeat("↑", internalLength) + "↖" + // bottom line "\n\n" } + +// AllEqual returns true iff all the provided elements are equal to each other. +func AllEqual[T comparable](elems ...T) bool { + for i := 1; i < len(elems); i++ { + if elems[i] != elems[0] { + return false + } + } + return true +} diff --git a/core/utils/utils_test.go b/core/utils/utils_test.go index 6830f2c3f08..34e63939a01 100644 --- a/core/utils/utils_test.go +++ b/core/utils/utils_test.go @@ -289,7 +289,7 @@ func TestDependentAwaiter(t *testing.T) { func TestBoundedQueue(t *testing.T) { t.Parallel() - q := utils.NewBoundedQueue(3) + q := utils.NewBoundedQueue[int](3) require.True(t, q.Empty()) require.False(t, q.Full()) @@ -297,11 +297,10 @@ func TestBoundedQueue(t *testing.T) { require.False(t, q.Empty()) require.False(t, q.Full()) - x := q.Take().(int) + x := q.Take() require.Equal(t, 1, x) - iface := q.Take() - require.Nil(t, iface) + require.Zero(t, q.Take()) require.True(t, q.Empty()) require.False(t, q.Full()) @@ -311,17 +310,17 @@ func TestBoundedQueue(t *testing.T) { q.Add(4) require.True(t, q.Full()) - x = q.Take().(int) + x = q.Take() require.Equal(t, 2, x) require.False(t, q.Empty()) require.False(t, q.Full()) - x = q.Take().(int) + x = q.Take() require.Equal(t, 3, x) require.False(t, q.Empty()) require.False(t, q.Full()) - x = q.Take().(int) + x = q.Take() require.Equal(t, 4, x) require.True(t, q.Empty()) require.False(t, q.Full()) @@ -330,7 +329,7 @@ func TestBoundedQueue(t *testing.T) { func TestBoundedPriorityQueue(t *testing.T) { t.Parallel() - q := utils.NewBoundedPriorityQueue(map[uint]uint{ + q := utils.NewBoundedPriorityQueue[int](map[uint]int{ 1: 3, 2: 1, }) @@ -339,12 +338,11 @@ func TestBoundedPriorityQueue(t *testing.T) { q.Add(1, 1) require.False(t, q.Empty()) - x := q.Take().(int) + x := q.Take() require.Equal(t, 1, x) require.True(t, q.Empty()) - iface := q.Take() - require.Nil(t, iface) + require.Zero(t, q.Take()) require.True(t, q.Empty()) q.Add(2, 1) @@ -352,34 +350,32 @@ func TestBoundedPriorityQueue(t *testing.T) { q.Add(1, 3) q.Add(1, 4) - x = q.Take().(int) + x = q.Take() require.Equal(t, 2, x) require.False(t, q.Empty()) - x = q.Take().(int) + x = q.Take() require.Equal(t, 3, x) require.False(t, q.Empty()) - x = q.Take().(int) + x = q.Take() require.Equal(t, 4, x) require.False(t, q.Empty()) - x = q.Take().(int) + x = q.Take() require.Equal(t, 1, x) require.True(t, q.Empty()) - iface = q.Take() - require.Nil(t, iface) + require.Zero(t, q.Take()) q.Add(2, 1) q.Add(2, 2) - x = q.Take().(int) + x = q.Take() require.Equal(t, 2, x) require.True(t, q.Empty()) - iface = q.Take() - require.Nil(t, iface) + require.Zero(t, q.Take()) } func TestEVMBytesToUint64(t *testing.T) { @@ -474,3 +470,9 @@ func Test_StartStopOnce_MultipleStartNoBlock(t *testing.T) { require.Equal(t, 3, <-ch) // 3 arrives before 2 because it returns immediately require.Equal(t, 2, <-ch) } + +func TestAllEqual(t *testing.T) { + require.False(t, utils.AllEqual(1, 2, 3, 4, 5)) + require.True(t, utils.AllEqual(1, 1, 1, 1, 1)) + require.False(t, utils.AllEqual(1, 1, 1, 2, 1, 1, 1)) +} diff --git a/core/web/bridge_types_controller.go b/core/web/bridge_types_controller.go index a29c24241c4..af41963844a 100644 --- a/core/web/bridge_types_controller.go +++ b/core/web/bridge_types_controller.go @@ -31,9 +31,8 @@ func ValidateBridgeTypeNotExist(bt *bridges.BridgeTypeRequest, orm bridges.ORM) return fe.CoerceEmptyToNil() } -// ValidateBridgeType checks that the bridge type doesn't have a duplicate -// or invalid name or invalid url -func ValidateBridgeType(bt *bridges.BridgeTypeRequest, orm bridges.ORM) error { +// ValidateBridgeType checks that the bridge type has the required field with valid values. +func ValidateBridgeType(bt *bridges.BridgeTypeRequest) error { fe := models.NewJSONAPIErrors() if len(bt.Name.String()) < 1 { fe.Add("No name specified") @@ -70,11 +69,11 @@ func (btc *BridgeTypesController) Create(c *gin.Context) { jsonAPIError(c, http.StatusInternalServerError, err) return } - orm := btc.App.BridgeORM() - if e := ValidateBridgeType(btr, orm); e != nil { + if e := ValidateBridgeType(btr); e != nil { jsonAPIError(c, http.StatusBadRequest, e) return } + orm := btc.App.BridgeORM() if e := ValidateBridgeTypeNotExist(btr, orm); e != nil { jsonAPIError(c, http.StatusBadRequest, e) return @@ -162,7 +161,7 @@ func (btc *BridgeTypesController) Update(c *gin.Context) { jsonAPIError(c, http.StatusUnprocessableEntity, err) return } - if err := ValidateBridgeType(btr, orm); err != nil { + if err := ValidateBridgeType(btr); err != nil { jsonAPIError(c, http.StatusBadRequest, err) return } diff --git a/core/web/bridge_types_controller_test.go b/core/web/bridge_types_controller_test.go index 29ace518b3f..08845542d88 100644 --- a/core/web/bridge_types_controller_test.go +++ b/core/web/bridge_types_controller_test.go @@ -23,10 +23,6 @@ import ( func TestValidateBridgeType(t *testing.T) { t.Parallel() - db := pgtest.NewSqlxDB(t) - cfg := cltest.NewTestGeneralConfig(t) - orm := bridges.NewORM(db, logger.TestLogger(t), cfg) - tests := []struct { description string request bridges.BridgeTypeRequest @@ -108,7 +104,7 @@ func TestValidateBridgeType(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { - result := web.ValidateBridgeType(&test.request, orm) + result := web.ValidateBridgeType(&test.request) assert.Equal(t, test.want, result) }) } diff --git a/core/web/eth_keys_controller.go b/core/web/eth_keys_controller.go index 9300a0389cf..b7e12d62eee 100644 --- a/core/web/eth_keys_controller.go +++ b/core/web/eth_keys_controller.go @@ -35,7 +35,7 @@ func (ekc *ETHKeysController) Index(c *gin.Context) { if ekc.App.GetConfig().Dev() { keys, err = ethKeyStore.GetAll() } else { - keys, err = ethKeyStore.SendingKeys() + keys, err = ethKeyStore.SendingKeys(nil) } if err != nil { err = errors.Errorf("error getting unlocked keys: %v", err) diff --git a/core/web/evm_forwarders_controller_test.go b/core/web/evm_forwarders_controller_test.go index a26f09ed28c..09049fbe0fb 100644 --- a/core/web/evm_forwarders_controller_test.go +++ b/core/web/evm_forwarders_controller_test.go @@ -43,10 +43,10 @@ func Test_EVMForwardersController_Create(t *testing.T) { controller := setupEVMForwardersControllerTest(t) // Setting up chain - chainId := *utils.NewBigI(42) + chainId := testutils.NewRandomEVMChainID() chaincfg := types.ChainCfg{} chainSet := controller.app.GetChains().EVM - dbChain, err := chainSet.ORM().CreateChain(utils.Big(*chainId.ToInt()), chaincfg) + dbChain, err := chainSet.ORM().CreateChain(utils.Big(*chainId), chaincfg) require.NoError(t, err) // Build EVMForwarderRequest @@ -75,10 +75,10 @@ func Test_EVMForwardersController_Index(t *testing.T) { controller := setupEVMForwardersControllerTest(t) // Setting up chain - chainId := *utils.NewBigI(42) + chainId := testutils.NewRandomEVMChainID() chaincfg := types.ChainCfg{} chainSet := controller.app.GetChains().EVM - dbChain, err := chainSet.ORM().CreateChain(utils.Big(*chainId.ToInt()), chaincfg) + dbChain, err := chainSet.ORM().CreateChain(utils.Big(*chainId), chaincfg) require.NoError(t, err) // Build EVMForwarderRequest diff --git a/core/web/evm_nodes_controller.go b/core/web/evm_nodes_controller.go index 67e2e22f463..ee06a0c3705 100644 --- a/core/web/evm_nodes_controller.go +++ b/core/web/evm_nodes_controller.go @@ -58,7 +58,13 @@ func (nc *EVMNodesController) Create(c *gin.Context) { return } - node, err := nc.App.EVMORM().CreateNode(request) + node, err := nc.App.EVMORM().CreateNode(types.Node{ + Name: request.Name, + EVMChainID: request.EVMChainID, + WSURL: request.WSURL, + HTTPURL: request.HTTPURL, + SendOnly: request.SendOnly, + }) if err != nil { jsonAPIError(c, http.StatusBadRequest, err) @@ -70,13 +76,13 @@ func (nc *EVMNodesController) Create(c *gin.Context) { // Delete removes an EVM node. func (nc *EVMNodesController) Delete(c *gin.Context) { - id, err := strconv.ParseInt(c.Param("ID"), 10, 64) + id, err := strconv.ParseInt(c.Param("ID"), 10, 32) if err != nil { jsonAPIError(c, http.StatusUnprocessableEntity, err) return } - err = nc.App.EVMORM().DeleteNode(id) + err = nc.App.EVMORM().DeleteNode(int32(id)) if err != nil { jsonAPIError(c, http.StatusInternalServerError, err) diff --git a/core/web/jobs_controller_test.go b/core/web/jobs_controller_test.go index e8de1586f4b..826d1e28d6b 100644 --- a/core/web/jobs_controller_test.go +++ b/core/web/jobs_controller_test.go @@ -223,6 +223,23 @@ func TestJobController_Create_HappyPath(t *testing.T) { require.Equal(t, "CRON_TZ=UTC * 0 0 1 1 *", jb.CronSpec.CronSchedule) }, }, + { + name: "cron-dot-separator", + toml: testspecs.CronSpecDotSep, + assertion: func(t *testing.T, r *http.Response) { + require.Equal(t, http.StatusOK, r.StatusCode) + resource := presenters.JobResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, r), &resource) + assert.NoError(t, err) + + jb, err := jorm.FindJob(context.Background(), mustInt32FromString(t, resource.ID)) + require.NoError(t, err) + require.NotNil(t, jb.CronSpec) + + assert.NotNil(t, resource.PipelineSpec.DotDAGSource) + require.Equal(t, "CRON_TZ=UTC * 0 0 1 1 *", jb.CronSpec.CronSchedule) + }, + }, { name: "directrequest", toml: testspecs.DirectRequestSpec, diff --git a/core/web/loader/loader_test.go b/core/web/loader/loader_test.go index d0de11e86c1..ab987edf7d9 100644 --- a/core/web/loader/loader_test.go +++ b/core/web/loader/loader_test.go @@ -16,6 +16,7 @@ import ( txmgrMocks "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr/mocks" "github.com/smartcontractkit/chainlink/core/chains/evm/types" coremocks "github.com/smartcontractkit/chainlink/core/internal/mocks" + "github.com/smartcontractkit/chainlink/core/internal/testutils/evmtest" "github.com/smartcontractkit/chainlink/core/services/chainlink" "github.com/smartcontractkit/chainlink/core/services/feeds" feedsMocks "github.com/smartcontractkit/chainlink/core/services/feeds/mocks" @@ -28,12 +29,11 @@ import ( func TestLoader_Chains(t *testing.T) { t.Parallel() - evmORM := &evmmocks.ORM{} app := &coremocks.Application{} ctx := InjectDataloader(context.Background(), app) defer t.Cleanup(func() { - mock.AssertExpectationsForObjects(t, app, evmORM) + mock.AssertExpectationsForObjects(t, app) }) id := utils.Big{} @@ -56,11 +56,7 @@ func TestLoader_Chains(t *testing.T) { ID: id2, Enabled: true, } - - evmORM.On("GetChainsByIDs", []utils.Big{id2, id, chainId3}).Return([]types.Chain{ - chain, - chain2, - }, nil) + evmORM := evmtest.NewMockORM([]types.Chain{chain, chain2}, nil) app.On("EVMORM").Return(evmORM) batcher := chainBatcher{app} diff --git a/core/web/presenters/job.go b/core/web/presenters/job.go index d8972401d48..b3be468c1e7 100644 --- a/core/web/presenters/job.go +++ b/core/web/presenters/job.go @@ -276,18 +276,27 @@ func NewCronSpec(spec *job.CronSpec) *CronSpec { } type VRFSpec struct { - CoordinatorAddress ethkey.EIP55Address `json:"coordinatorAddress"` - PublicKey secp256k1.PublicKey `json:"publicKey"` - FromAddresses []ethkey.EIP55Address `json:"fromAddresses"` - PollPeriod models.Duration `json:"pollPeriod"` - MinIncomingConfirmations uint32 `json:"confirmations"` - CreatedAt time.Time `json:"createdAt"` - UpdatedAt time.Time `json:"updatedAt"` - EVMChainID *utils.Big `json:"evmChainID"` + BatchCoordinatorAddress *ethkey.EIP55Address `json:"batchCoordinatorAddress"` + BatchFulfillmentEnabled bool `json:"batchFulfillmentEnabled"` + BatchFulfillmentGasMultiplier float64 `json:"batchFulfillmentGasMultiplier"` + CoordinatorAddress ethkey.EIP55Address `json:"coordinatorAddress"` + PublicKey secp256k1.PublicKey `json:"publicKey"` + FromAddresses []ethkey.EIP55Address `json:"fromAddresses"` + PollPeriod models.Duration `json:"pollPeriod"` + MinIncomingConfirmations uint32 `json:"confirmations"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + EVMChainID *utils.Big `json:"evmChainID"` + ChunkSize uint32 `json:"chunkSize"` + RequestTimeout models.Duration `json:"requestTimeout"` + BackoffInitialDelay models.Duration `json:"backoffInitialDelay"` + BackoffMaxDelay models.Duration `json:"backoffMaxDelay"` } func NewVRFSpec(spec *job.VRFSpec) *VRFSpec { return &VRFSpec{ + BatchCoordinatorAddress: spec.BatchCoordinatorAddress, + BatchFulfillmentEnabled: spec.BatchFulfillmentEnabled, CoordinatorAddress: spec.CoordinatorAddress, PublicKey: spec.PublicKey, FromAddresses: spec.FromAddresses, @@ -296,6 +305,10 @@ func NewVRFSpec(spec *job.VRFSpec) *VRFSpec { CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, EVMChainID: spec.EVMChainID, + ChunkSize: spec.ChunkSize, + RequestTimeout: models.MustMakeDuration(spec.RequestTimeout), + BackoffInitialDelay: models.MustMakeDuration(spec.BackoffInitialDelay), + BackoffMaxDelay: models.MustMakeDuration(spec.BackoffMaxDelay), } } diff --git a/core/web/presenters/solana_chain.go b/core/web/presenters/solana_chain.go new file mode 100644 index 00000000000..bc3e918bd99 --- /dev/null +++ b/core/web/presenters/solana_chain.go @@ -0,0 +1,61 @@ +package presenters + +import ( + "time" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + + "github.com/smartcontractkit/chainlink/core/chains/solana" +) + +// SolanaChainResource is an Solana chain JSONAPI resource. +type SolanaChainResource struct { + JAID + Enabled bool `json:"enabled"` + Config db.ChainCfg `json:"config"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// GetName implements the api2go EntityNamer interface +func (r SolanaChainResource) GetName() string { + return "solana_chain" +} + +// NewSolanaChainResource returns a new SolanaChainResource for chain. +func NewSolanaChainResource(chain solana.Chain) SolanaChainResource { + return SolanaChainResource{ + JAID: NewJAID(chain.ID), + Config: chain.Cfg, + Enabled: chain.Enabled, + CreatedAt: chain.CreatedAt, + UpdatedAt: chain.UpdatedAt, + } +} + +// SolanaNodeResource is a Solana node JSONAPI resource. +type SolanaNodeResource struct { + JAID + Name string `json:"name"` + SolanaChainID string `json:"solanaChainID"` + SolanaURL string `json:"solanaURL"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// GetName implements the api2go EntityNamer interface +func (r SolanaNodeResource) GetName() string { + return "solana_node" +} + +// NewSolanaNodeResource returns a new SolanaNodeResource for node. +func NewSolanaNodeResource(node db.Node) SolanaNodeResource { + return SolanaNodeResource{ + JAID: NewJAIDInt32(node.ID), + Name: node.Name, + SolanaChainID: node.SolanaChainID, + SolanaURL: node.SolanaURL, + CreatedAt: node.CreatedAt, + UpdatedAt: node.UpdatedAt, + } +} diff --git a/core/web/presenters/solana_msg.go b/core/web/presenters/solana_msg.go new file mode 100644 index 00000000000..b7330754e38 --- /dev/null +++ b/core/web/presenters/solana_msg.go @@ -0,0 +1,23 @@ +package presenters + +// SolanaMsgResource repesents a Solana message JSONAPI resource. +type SolanaMsgResource struct { + JAID + ChainID string + From string `json:"from"` + To string `json:"to"` + Amount uint64 `json:"amount"` +} + +// GetName implements the api2go EntityNamer interface +func (SolanaMsgResource) GetName() string { + return "solana_messages" +} + +// NewSolanaMsgResource returns a new partial SolanaMsgResource. +func NewSolanaMsgResource(id string, chainID string) SolanaMsgResource { + return SolanaMsgResource{ + JAID: NewJAID(id), + ChainID: chainID, + } +} diff --git a/core/web/presenters/terra_chain.go b/core/web/presenters/terra_chain.go index eb2a687f8d2..da25a28f7ba 100644 --- a/core/web/presenters/terra_chain.go +++ b/core/web/presenters/terra_chain.go @@ -4,6 +4,8 @@ import ( "time" "github.com/smartcontractkit/chainlink-terra/pkg/terra/db" + + "github.com/smartcontractkit/chainlink/core/chains/terra/types" ) // TerraChainResource is an Terra chain JSONAPI resource. @@ -21,7 +23,7 @@ func (r TerraChainResource) GetName() string { } // NewTerraChainResource returns a new TerraChainResource for chain. -func NewTerraChainResource(chain db.Chain) TerraChainResource { +func NewTerraChainResource(chain types.Chain) TerraChainResource { return TerraChainResource{ JAID: NewJAID(chain.ID), Config: chain.Cfg, diff --git a/core/web/resolver/config_test.go b/core/web/resolver/config_test.go index 77cfcb62b0f..0ec6295d8a0 100644 --- a/core/web/resolver/config_test.go +++ b/core/web/resolver/config_test.go @@ -80,6 +80,8 @@ func TestResolver_Config(t *testing.T) { KeeperMaximumGracePeriod: null.IntFrom(1), KeeperRegistrySyncInterval: nil, KeeperRegistrySyncUpkeepQueueSize: null.IntFrom(1), + KeeperTurnLookBack: null.IntFrom(0), + KeeperTurnFlagEnabled: null.BoolFrom(true), LogLevel: &logLevel, DefaultLogLevel: nil, LogFileDir: null.StringFrom("foo"), @@ -270,6 +272,14 @@ func TestResolver_Config(t *testing.T) { "key": "KEEPER_CHECK_UPKEEP_GAS_PRICE_FEATURE_ENABLED", "value": "false" }, + { + "key": "KEEPER_TURN_LOOK_BACK", + "value": "0" + }, + { + "key": "KEEPER_TURN_FLAG_ENABLED", + "value": "true" + }, { "key": "LEASE_LOCK_DURATION", "value": "10s" diff --git a/core/web/resolver/eth_key_test.go b/core/web/resolver/eth_key_test.go index 03e15da2605..a39653a50da 100644 --- a/core/web/resolver/eth_key_test.go +++ b/core/web/resolver/eth_key_test.go @@ -97,14 +97,7 @@ func TestResolver_ETHKeys(t *testing.T) { f.Mocks.chain.On("Config").Return(f.Mocks.scfg) f.Mocks.chainSet.On("Get", states[0].EVMChainID.ToInt()).Return(f.Mocks.chain, nil) f.Mocks.chainSet.On("Get", states[1].EVMChainID.ToInt()).Return(f.Mocks.chain, nil) - f.Mocks.evmORM.On("GetChainsByIDs", []utils.Big{chainID, chainID2}).Return([]types.Chain{ - { - ID: chainID, - }, - { - ID: chainID2, - }, - }, nil) + f.Mocks.evmORM.PutChains(types.Chain{ID: chainID}, types.Chain{ID: chainID2}) f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) f.App.On("GetKeyStore").Return(f.Mocks.keystore) f.App.On("EVMORM").Return(f.Mocks.evmORM) @@ -161,7 +154,7 @@ func TestResolver_ETHKeys(t *testing.T) { f.Mocks.cfg.On("Dev").Return(false) f.App.On("GetConfig").Return(f.Mocks.cfg) - f.Mocks.ethKs.On("SendingKeys").Return(keys, nil) + f.Mocks.ethKs.On("SendingKeys", (*big.Int)(nil)).Return(keys, nil) f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil) f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(keys[0], nil) f.Mocks.ethClient.On("GetLINKBalance", linkAddr, address.Address()).Return(assets.NewLinkFromJuels(12), nil) @@ -172,11 +165,7 @@ func TestResolver_ETHKeys(t *testing.T) { f.Mocks.scfg.On("KeySpecificMaxGasPriceWei", keys[0].Address.Address()).Return(big.NewInt(1)) f.Mocks.chain.On("Config").Return(f.Mocks.scfg) f.Mocks.chainSet.On("Get", states[0].EVMChainID.ToInt()).Return(f.Mocks.chain, nil) - f.Mocks.evmORM.On("GetChainsByIDs", []utils.Big{chainID}).Return([]types.Chain{ - { - ID: chainID, - }, - }, nil) + f.Mocks.evmORM.PutChains(types.Chain{ID: chainID}) f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) f.App.On("GetKeyStore").Return(f.Mocks.keystore) f.App.On("EVMORM").Return(f.Mocks.evmORM) @@ -220,15 +209,11 @@ func TestResolver_ETHKeys(t *testing.T) { f.Mocks.cfg.On("Dev").Return(false) f.App.On("GetConfig").Return(f.Mocks.cfg) - f.Mocks.ethKs.On("SendingKeys").Return(keys, nil) + f.Mocks.ethKs.On("SendingKeys", (*big.Int)(nil)).Return(keys, nil) f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil) f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(keys[0], nil) f.Mocks.chainSet.On("Get", states[0].EVMChainID.ToInt()).Return(f.Mocks.chain, evm.ErrNoChains) - f.Mocks.evmORM.On("GetChainsByIDs", []utils.Big{chainID}).Return([]types.Chain{ - { - ID: chainID, - }, - }, nil) + f.Mocks.evmORM.PutChains(types.Chain{ID: chainID}) f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) f.App.On("GetKeyStore").Return(f.Mocks.keystore) f.App.On("EVMORM").Return(f.Mocks.evmORM) @@ -282,7 +267,7 @@ func TestResolver_ETHKeys(t *testing.T) { before: func(f *gqlTestFramework) { f.Mocks.cfg.On("Dev").Return(false) f.App.On("GetConfig").Return(f.Mocks.cfg) - f.Mocks.ethKs.On("SendingKeys").Return(nil, gError) + f.Mocks.ethKs.On("SendingKeys", (*big.Int)(nil)).Return(nil, gError) f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) f.App.On("GetKeyStore").Return(f.Mocks.keystore) }, @@ -303,7 +288,7 @@ func TestResolver_ETHKeys(t *testing.T) { before: func(f *gqlTestFramework) { f.Mocks.cfg.On("Dev").Return(false) f.App.On("GetConfig").Return(f.Mocks.cfg) - f.Mocks.ethKs.On("SendingKeys").Return(keys, nil) + f.Mocks.ethKs.On("SendingKeys", (*big.Int)(nil)).Return(keys, nil) f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(nil, gError) f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) f.App.On("GetKeyStore").Return(f.Mocks.keystore) @@ -335,7 +320,7 @@ func TestResolver_ETHKeys(t *testing.T) { f.Mocks.cfg.On("Dev").Return(false) f.App.On("GetConfig").Return(f.Mocks.cfg) - f.Mocks.ethKs.On("SendingKeys").Return(keys, nil) + f.Mocks.ethKs.On("SendingKeys", (*big.Int)(nil)).Return(keys, nil) f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil) f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(ethkey.KeyV2{}, gError) f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) @@ -368,7 +353,7 @@ func TestResolver_ETHKeys(t *testing.T) { f.Mocks.cfg.On("Dev").Return(false) f.App.On("GetConfig").Return(f.Mocks.cfg) - f.Mocks.ethKs.On("SendingKeys").Return(keys, nil) + f.Mocks.ethKs.On("SendingKeys", (*big.Int)(nil)).Return(keys, nil) f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil) f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(ethkey.KeyV2{}, nil) f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) @@ -405,7 +390,7 @@ func TestResolver_ETHKeys(t *testing.T) { f.Mocks.cfg.On("Dev").Return(false) f.App.On("GetConfig").Return(f.Mocks.cfg) - f.Mocks.ethKs.On("SendingKeys").Return(keys, nil) + f.Mocks.ethKs.On("SendingKeys", (*big.Int)(nil)).Return(keys, nil) f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil) f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(keys[0], nil) f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) @@ -419,11 +404,7 @@ func TestResolver_ETHKeys(t *testing.T) { f.App.On("GetChains").Return(chainlink.Chains{EVM: f.Mocks.chainSet}) f.Mocks.scfg.On("KeySpecificMaxGasPriceWei", keys[0].Address.Address()).Return(big.NewInt(1)) f.Mocks.chain.On("Config").Return(f.Mocks.scfg) - f.Mocks.evmORM.On("GetChainsByIDs", []utils.Big{chainID}).Return([]types.Chain{ - { - ID: chainID, - }, - }, nil) + f.Mocks.evmORM.PutChains(types.Chain{ID: chainID}) f.App.On("EVMORM").Return(f.Mocks.evmORM) }, query: query, @@ -465,7 +446,7 @@ func TestResolver_ETHKeys(t *testing.T) { f.Mocks.cfg.On("Dev").Return(false) f.App.On("GetConfig").Return(f.Mocks.cfg) - f.Mocks.ethKs.On("SendingKeys").Return(keys, nil) + f.Mocks.ethKs.On("SendingKeys", (*big.Int)(nil)).Return(keys, nil) f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil) f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(keys[0], nil) f.Mocks.ethClient.On("GetLINKBalance", linkAddr, address.Address()).Return(assets.NewLinkFromJuels(12), nil) @@ -475,11 +456,7 @@ func TestResolver_ETHKeys(t *testing.T) { f.Mocks.scfg.On("KeySpecificMaxGasPriceWei", keys[0].Address.Address()).Return(big.NewInt(1)) f.Mocks.chain.On("Config").Return(f.Mocks.scfg) f.Mocks.chainSet.On("Get", states[0].EVMChainID.ToInt()).Return(f.Mocks.chain, nil) - f.Mocks.evmORM.On("GetChainsByIDs", []utils.Big{chainID}).Return([]types.Chain{ - { - ID: chainID, - }, - }, nil) + f.Mocks.evmORM.PutChains(types.Chain{ID: chainID}) f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) f.App.On("GetKeyStore").Return(f.Mocks.keystore) f.App.On("EVMORM").Return(f.Mocks.evmORM) diff --git a/core/web/resolver/eth_transaction_test.go b/core/web/resolver/eth_transaction_test.go index 9e24d3919ee..0056db840c9 100644 --- a/core/web/resolver/eth_transaction_test.go +++ b/core/web/resolver/eth_transaction_test.go @@ -80,11 +80,7 @@ func TestResolver_EthTransaction(t *testing.T) { }, }, nil) f.App.On("TxmORM").Return(f.Mocks.txmORM) - f.Mocks.evmORM.On("GetChainsByIDs", []utils.Big{chainID}).Return([]types.Chain{ - { - ID: chainID, - }, - }, nil) + f.Mocks.evmORM.PutChains(types.Chain{ID: chainID}) f.App.On("EVMORM").Return(f.Mocks.evmORM) }, query: query, @@ -140,11 +136,7 @@ func TestResolver_EthTransaction(t *testing.T) { }, }, nil) f.App.On("TxmORM").Return(f.Mocks.txmORM) - f.Mocks.evmORM.On("GetChainsByIDs", []utils.Big{chainID}).Return([]types.Chain{ - { - ID: chainID, - }, - }, nil) + f.Mocks.evmORM.PutChains(types.Chain{ID: chainID}) f.App.On("EVMORM").Return(f.Mocks.evmORM) }, query: query, diff --git a/core/web/resolver/evm_chain_test.go b/core/web/resolver/evm_chain_test.go index 7676c4308dc..1a9e46a14a6 100644 --- a/core/web/resolver/evm_chain_test.go +++ b/core/web/resolver/evm_chain_test.go @@ -74,29 +74,27 @@ func TestResolver_Chains(t *testing.T) { require.NoError(t, err) f.App.On("EVMORM").Return(f.Mocks.evmORM) - f.Mocks.evmORM.On("Chains", PageDefaultOffset, PageDefaultLimit).Return([]types.Chain{ - { - ID: chainID, - Enabled: true, - CreatedAt: f.Timestamp(), - Cfg: types.ChainCfg{ - BlockHistoryEstimatorBlockDelay: null.IntFrom(1), - EthTxReaperThreshold: &threshold, - EthTxResendAfterThreshold: &threshold, - EvmEIP1559DynamicFees: null.BoolFrom(true), - EvmGasLimitMultiplier: null.FloatFrom(1.23), - GasEstimatorMode: null.StringFrom("BlockHistory"), - ChainType: null.StringFrom("optimism"), - LinkContractAddress: null.StringFrom(linkContractAddress), - KeySpecific: map[string]types.ChainCfg{ - "test-address": { - BlockHistoryEstimatorBlockDelay: null.IntFrom(0), - EvmEIP1559DynamicFees: null.BoolFrom(false), - }, + f.Mocks.evmORM.PutChains(types.Chain{ + ID: chainID, + Enabled: true, + CreatedAt: f.Timestamp(), + Cfg: types.ChainCfg{ + BlockHistoryEstimatorBlockDelay: null.IntFrom(1), + EthTxReaperThreshold: &threshold, + EthTxResendAfterThreshold: &threshold, + EvmEIP1559DynamicFees: null.BoolFrom(true), + EvmGasLimitMultiplier: null.FloatFrom(1.23), + GasEstimatorMode: null.StringFrom("BlockHistory"), + ChainType: null.StringFrom("optimism"), + LinkContractAddress: null.StringFrom(linkContractAddress), + KeySpecific: map[string]types.ChainCfg{ + "test-address": { + BlockHistoryEstimatorBlockDelay: null.IntFrom(0), + EvmEIP1559DynamicFees: null.BoolFrom(false), }, }, }, - }, 1, nil) + }) f.App.On("GetChains").Return(chainlink.Chains{EVM: f.Mocks.chainSet}) f.Mocks.chainSet.On("GetNodesByChainIDs", mock.Anything, []utils.Big{chainID}). Return([]types.Node{ @@ -197,7 +195,7 @@ func TestResolver_Chain(t *testing.T) { f.App.On("EVMORM").Return(f.Mocks.evmORM) f.App.On("GetChains").Return(chainlink.Chains{EVM: f.Mocks.chainSet}) - f.Mocks.evmORM.On("Chain", chainID).Return(types.Chain{ + f.Mocks.evmORM.PutChains(types.Chain{ ID: chainID, Enabled: true, CreatedAt: f.Timestamp(), @@ -216,7 +214,7 @@ func TestResolver_Chain(t *testing.T) { }, }, }, - }, nil) + }) f.Mocks.chainSet.On("GetNodesByChainIDs", mock.Anything, []utils.Big{chainID}). Return([]types.Node{ { @@ -259,7 +257,6 @@ func TestResolver_Chain(t *testing.T) { authenticated: true, before: func(f *gqlTestFramework) { f.App.On("EVMORM").Return(f.Mocks.evmORM) - f.Mocks.evmORM.On("Chain", chainID).Return(types.Chain{}, sql.ErrNoRows) }, query: query, result: ` @@ -516,9 +513,7 @@ func TestResolver_DeleteChain(t *testing.T) { name: "success", authenticated: true, before: func(f *gqlTestFramework) { - f.Mocks.evmORM.On("Chain", chainID).Return(types.Chain{ - ID: chainID, - }, nil) + f.Mocks.evmORM.PutChains(types.Chain{ID: chainID}) f.Mocks.chainSet.On("Remove", chainID.ToInt()).Return(nil) f.App.On("EVMORM").Return(f.Mocks.evmORM) f.App.On("GetChains").Return(chainlink.Chains{EVM: f.Mocks.chainSet}) @@ -538,7 +533,6 @@ func TestResolver_DeleteChain(t *testing.T) { name: "not found error", authenticated: true, before: func(f *gqlTestFramework) { - f.Mocks.evmORM.On("Chain", chainID).Return(types.Chain{}, sql.ErrNoRows) f.App.On("EVMORM").Return(f.Mocks.evmORM) }, query: mutation, @@ -555,9 +549,7 @@ func TestResolver_DeleteChain(t *testing.T) { name: "generic error on delete", authenticated: true, before: func(f *gqlTestFramework) { - f.Mocks.evmORM.On("Chain", chainID).Return(types.Chain{ - ID: chainID, - }, nil) + f.Mocks.evmORM.PutChains(types.Chain{ID: chainID}) f.Mocks.chainSet.On("Remove", chainID.ToInt()).Return(gError) f.App.On("EVMORM").Return(f.Mocks.evmORM) f.App.On("GetChains").Return(chainlink.Chains{EVM: f.Mocks.chainSet}) diff --git a/core/web/resolver/mutation.go b/core/web/resolver/mutation.go index ce7cff4fbf8..12f55d23bd8 100644 --- a/core/web/resolver/mutation.go +++ b/core/web/resolver/mutation.go @@ -362,7 +362,13 @@ func (r *Resolver) CreateNode(ctx context.Context, args struct { return nil, err } - node, err := r.App.EVMORM().CreateNode(*args.Input) + node, err := r.App.EVMORM().CreateNode(types.Node{ + Name: args.Input.Name, + EVMChainID: args.Input.EVMChainID, + WSURL: args.Input.WSURL, + HTTPURL: args.Input.HTTPURL, + SendOnly: args.Input.SendOnly, + }) if err != nil { return nil, err } @@ -391,7 +397,7 @@ func (r *Resolver) DeleteNode(ctx context.Context, args struct { return nil, err } - err = r.App.EVMORM().DeleteNode(int64(id)) + err = r.App.EVMORM().DeleteNode(id) if err != nil { if errors.Is(err, sql.ErrNoRows) { // Sending the SQL error as the expected error to happen diff --git a/core/web/resolver/node_test.go b/core/web/resolver/node_test.go index c84d8969843..46cde4a4f9d 100644 --- a/core/web/resolver/node_test.go +++ b/core/web/resolver/node_test.go @@ -59,11 +59,7 @@ func TestResolver_Nodes(t *testing.T) { }, }, 1, nil) f.App.On("EVMORM").Return(f.Mocks.evmORM) - f.Mocks.evmORM.On("GetChainsByIDs", []utils.Big{chainID}).Return([]types.Chain{ - { - ID: chainID, - }, - }, nil) + f.Mocks.evmORM.PutChains(types.Chain{ID: chainID}) }, query: query, result: ` @@ -214,17 +210,7 @@ func Test_CreateNodeMutation(t *testing.T) { authenticated: true, before: func(f *gqlTestFramework) { f.App.On("EVMORM").Return(f.Mocks.evmORM) - f.Mocks.evmORM.On("CreateNode", createNodeInput).Return(types.Node{ - ID: int32(1), - Name: createNodeInput.Name, - EVMChainID: createNodeInput.EVMChainID, - WSURL: createNodeInput.WSURL, - HTTPURL: createNodeInput.HTTPURL, - SendOnly: createNodeInput.SendOnly, - }, nil) - f.Mocks.evmORM.On("GetChainsByIDs", []utils.Big{createNodeInput.EVMChainID}).Return([]types.Chain{ - {ID: *utils.NewBigI(1), Enabled: true}, - }, nil) + f.Mocks.evmORM.PutChains(types.Chain{ID: *utils.NewBigI(1), Enabled: true}) }, query: mutation, variables: input, @@ -303,7 +289,7 @@ func Test_DeleteNodeMutation(t *testing.T) { before: func(f *gqlTestFramework) { f.App.On("EVMORM").Return(f.Mocks.evmORM) f.Mocks.chainSet.On("GetNode", mock.Anything, fakeID).Return(fakeNode, nil) - f.Mocks.evmORM.On("DeleteNode", int64(2)).Return(nil) + f.Mocks.evmORM.AddNodes(types.Node{ID: 2}) f.App.On("GetChains").Return(chainlink.Chains{EVM: f.Mocks.chainSet}) }, query: mutation, @@ -333,7 +319,6 @@ func Test_DeleteNodeMutation(t *testing.T) { before: func(f *gqlTestFramework) { f.App.On("EVMORM").Return(f.Mocks.evmORM) f.Mocks.chainSet.On("GetNode", mock.Anything, fakeID).Return(fakeNode, nil) - f.Mocks.evmORM.On("DeleteNode", int64(2)).Return(sql.ErrNoRows) f.App.On("GetChains").Return(chainlink.Chains{EVM: f.Mocks.chainSet}) }, query: mutation, diff --git a/core/web/resolver/query.go b/core/web/resolver/query.go index 2175201cb55..d5d0c5f3a56 100644 --- a/core/web/resolver/query.go +++ b/core/web/resolver/query.go @@ -390,7 +390,7 @@ func (r *Resolver) ETHKeys(ctx context.Context) (*ETHKeysPayloadResolver, error) if r.App.GetConfig().Dev() { keys, err = ks.GetAll() } else { - keys, err = ks.SendingKeys() + keys, err = ks.SendingKeys(nil) } if err != nil { return nil, fmt.Errorf("error getting unlocked keys: %v", err) @@ -428,12 +428,10 @@ func (r *Resolver) ETHKeys(ctx context.Context) (*ETHKeysPayloadResolver, error) chain: chain, }) } - // Put funding keys to the end sort.SliceStable(ethKeys, func(i, j int) bool { - return !ethKeys[i].state.IsFunding && ethKeys[j].state.IsFunding + return !states[i].IsFunding && states[j].IsFunding }) - return NewETHKeysPayload(ethKeys), nil } diff --git a/core/web/resolver/resolver_test.go b/core/web/resolver/resolver_test.go index 2854a233bc8..b1e56de5d59 100644 --- a/core/web/resolver/resolver_test.go +++ b/core/web/resolver/resolver_test.go @@ -16,6 +16,7 @@ import ( txmgrMocks "github.com/smartcontractkit/chainlink/core/chains/evm/txmgr/mocks" configMocks "github.com/smartcontractkit/chainlink/core/config/mocks" coremocks "github.com/smartcontractkit/chainlink/core/internal/mocks" + "github.com/smartcontractkit/chainlink/core/internal/testutils/evmtest" feedsMocks "github.com/smartcontractkit/chainlink/core/services/feeds/mocks" jobORMMocks "github.com/smartcontractkit/chainlink/core/services/job/mocks" keystoreMocks "github.com/smartcontractkit/chainlink/core/services/keystore/mocks" @@ -30,7 +31,7 @@ import ( type mocks struct { bridgeORM *bridgeORMMocks.ORM - evmORM *evmORMMocks.ORM + evmORM *evmtest.MockORM jobORM *jobORMMocks.ORM sessionsORM *sessionsMocks.ORM pipelineORM *pipelineMocks.ORM @@ -87,7 +88,7 @@ func setupFramework(t *testing.T) *gqlTestFramework { // Note - If you add a new mock make sure you assert it's expectation below. m := &mocks{ bridgeORM: &bridgeORMMocks.ORM{}, - evmORM: &evmORMMocks.ORM{}, + evmORM: evmtest.NewMockORM(nil, nil), jobORM: &jobORMMocks.ORM{}, feedsSvc: &feedsMocks.Service{}, sessionsORM: &sessionsMocks.ORM{}, @@ -115,7 +116,6 @@ func setupFramework(t *testing.T) *gqlTestFramework { mock.AssertExpectationsForObjects(t, app, m.bridgeORM, - m.evmORM, m.jobORM, m.sessionsORM, m.pipelineORM, diff --git a/core/web/resolver/spec.go b/core/web/resolver/spec.go index 0c3405b1cae..1643cd80a6f 100644 --- a/core/web/resolver/spec.go +++ b/core/web/resolver/spec.go @@ -151,8 +151,8 @@ func (r *DirectRequestSpecResolver) MinIncomingConfirmationsEnv() bool { return r.spec.MinIncomingConfirmationsEnv } -// MinContractPayment resolves the spec's evm chain id. -func (r *DirectRequestSpecResolver) MinContractPayment() string { +// MinContractPaymentLinkJuels resolves the spec's evm chain id. +func (r *DirectRequestSpecResolver) MinContractPaymentLinkJuels() string { return r.spec.MinContractPayment.String() } @@ -653,6 +653,40 @@ func (r *VRFSpecResolver) RequestTimeout() string { return r.spec.RequestTimeout.String() } +// BatchCoordinatorAddress resolves the spec's batch coordinator address. +func (r *VRFSpecResolver) BatchCoordinatorAddress() *string { + if r.spec.BatchCoordinatorAddress == nil { + return nil + } + addr := r.spec.BatchCoordinatorAddress.String() + return &addr +} + +// BatchFulfillmentEnabled resolves the spec's batch fulfillment enabled flag. +func (r *VRFSpecResolver) BatchFulfillmentEnabled() bool { + return r.spec.BatchFulfillmentEnabled +} + +// BatchFulfillmentGasMultiplier resolves the spec's batch fulfillment gas multiplier. +func (r *VRFSpecResolver) BatchFulfillmentGasMultiplier() float64 { + return r.spec.BatchFulfillmentGasMultiplier +} + +// ChunkSize resolves the spec's chunk size. +func (r *VRFSpecResolver) ChunkSize() int32 { + return int32(r.spec.ChunkSize) +} + +// BackoffInitialDelay resolves the spec's backoff initial delay. +func (r *VRFSpecResolver) BackoffInitialDelay() string { + return r.spec.BackoffInitialDelay.String() +} + +// BackoffMaxDelay resolves the spec's backoff max delay. +func (r *VRFSpecResolver) BackoffMaxDelay() string { + return r.spec.BackoffMaxDelay.String() +} + type WebhookSpecResolver struct { spec job.WebhookSpec } diff --git a/core/web/resolver/spec_test.go b/core/web/resolver/spec_test.go index 1a1ad659c99..c20caf17af7 100644 --- a/core/web/resolver/spec_test.go +++ b/core/web/resolver/spec_test.go @@ -112,7 +112,7 @@ func TestResolver_DirectRequestSpec(t *testing.T) { evmChainID minIncomingConfirmations minIncomingConfirmationsEnv - minContractPayment + minContractPaymentLinkJuels requesters } } @@ -130,7 +130,7 @@ func TestResolver_DirectRequestSpec(t *testing.T) { "evmChainID": "42", "minIncomingConfirmations": 1, "minIncomingConfirmationsEnv": true, - "minContractPayment": "1000", + "minContractPaymentLinkJuels": "1000", "requesters": ["0x3cCad4715152693fE3BC4460591e3D3Fbd071b42"] } } @@ -582,6 +582,9 @@ func TestResolver_VRFSpec(t *testing.T) { coordinatorAddress, err := ethkey.NewEIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C") require.NoError(t, err) + batchCoordinatorAddress, err := ethkey.NewEIP55Address("0x0ad9FE7a58216242a8475ca92F222b0640E26B63") + require.NoError(t, err) + fromAddress1, err := ethkey.NewEIP55Address("0x3cCad4715152693fE3BC4460591e3D3Fbd071b42") require.NoError(t, err) @@ -600,15 +603,21 @@ func TestResolver_VRFSpec(t *testing.T) { f.Mocks.jobORM.On("FindJobTx", id).Return(job.Job{ Type: job.VRF, VRFSpec: &job.VRFSpec{ - MinIncomingConfirmations: 1, - CoordinatorAddress: coordinatorAddress, - CreatedAt: f.Timestamp(), - EVMChainID: utils.NewBigI(42), - FromAddresses: []ethkey.EIP55Address{fromAddress1, fromAddress2}, - PollPeriod: 1 * time.Minute, - PublicKey: pubKey, - RequestedConfsDelay: 10, - RequestTimeout: 24 * time.Hour, + BatchCoordinatorAddress: &batchCoordinatorAddress, + BatchFulfillmentEnabled: true, + MinIncomingConfirmations: 1, + CoordinatorAddress: coordinatorAddress, + CreatedAt: f.Timestamp(), + EVMChainID: utils.NewBigI(42), + FromAddresses: []ethkey.EIP55Address{fromAddress1, fromAddress2}, + PollPeriod: 1 * time.Minute, + PublicKey: pubKey, + RequestedConfsDelay: 10, + RequestTimeout: 24 * time.Hour, + ChunkSize: 25, + BatchFulfillmentGasMultiplier: 1, + BackoffInitialDelay: time.Minute, + BackoffMaxDelay: time.Hour, }, }, nil) }, @@ -628,6 +637,12 @@ func TestResolver_VRFSpec(t *testing.T) { publicKey requestedConfsDelay requestTimeout + batchCoordinatorAddress + batchFulfillmentEnabled + batchFulfillmentGasMultiplier + chunkSize + backoffInitialDelay + backoffMaxDelay } } } @@ -647,7 +662,13 @@ func TestResolver_VRFSpec(t *testing.T) { "pollPeriod": "1m0s", "publicKey": "0x9dc09a0f898f3b5e8047204e7ce7e44b587920932f08431e29c9bf6923b8450a01", "requestedConfsDelay": 10, - "requestTimeout": "24h0m0s" + "requestTimeout": "24h0m0s", + "batchCoordinatorAddress": "0x0ad9FE7a58216242a8475ca92F222b0640E26B63", + "batchFulfillmentEnabled": true, + "batchFulfillmentGasMultiplier": 1, + "chunkSize": 25, + "backoffInitialDelay": "1m0s", + "backoffMaxDelay": "1h0m0s" } } } diff --git a/core/web/router.go b/core/web/router.go index ef3f5c99d14..6f109fbf5bf 100644 --- a/core/web/router.go +++ b/core/web/router.go @@ -23,7 +23,7 @@ import ( limits "github.com/gin-contrib/size" "github.com/gin-gonic/contrib/sessions" "github.com/gin-gonic/gin" - graphql "github.com/graph-gophers/graphql-go" + "github.com/graph-gophers/graphql-go" "github.com/graph-gophers/graphql-go/relay" "github.com/pkg/errors" "github.com/ulule/limiter" @@ -257,6 +257,8 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) { authv2.POST("/transfers/evm", ets.Create) tts := TerraTransfersController{app} authv2.POST("/transfers/terra", tts.Create) + sts := SolanaTransfersController{app} + authv2.POST("/transfers/solana", sts.Create) cc := ConfigController{app} authv2.GET("/config", cc.Show) @@ -366,6 +368,13 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) { authv2.PATCH("/chains/evm/:ID", echc.Update) authv2.DELETE("/chains/evm/:ID", echc.Delete) + schc := SolanaChainsController{app} + authv2.GET("/chains/solana", paginatedRequest(schc.Index)) + authv2.POST("/chains/solana", schc.Create) + authv2.GET("/chains/solana/:ID", schc.Show) + authv2.PATCH("/chains/solana/:ID", schc.Update) + authv2.DELETE("/chains/solana/:ID", schc.Delete) + tchc := TerraChainsController{app} authv2.GET("/chains/terra", paginatedRequest(tchc.Index)) authv2.POST("/chains/terra", tchc.Create) @@ -389,6 +398,12 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) { authv2.POST("/nodes/evm/forwarders", efc.Create) authv2.DELETE("/nodes/evm/forwarders/:fwdID", efc.Delete) + snc := SolanaNodesController{app} + authv2.GET("/nodes/solana", paginatedRequest(snc.Index)) + authv2.GET("/chains/solana/:ID/nodes", paginatedRequest(snc.Index)) + authv2.POST("/nodes/solana", snc.Create) + authv2.DELETE("/nodes/solana/:ID", snc.Delete) + tnc := TerraNodesController{app} authv2.GET("/nodes/terra", paginatedRequest(tnc.Index)) authv2.GET("/chains/terra/:ID/nodes", paginatedRequest(tnc.Index)) diff --git a/core/web/schema/type/spec.graphql b/core/web/schema/type/spec.graphql index e63e0dc67fc..eae7b618359 100644 --- a/core/web/schema/type/spec.graphql +++ b/core/web/schema/type/spec.graphql @@ -21,7 +21,7 @@ type DirectRequestSpec { evmChainID: String minIncomingConfirmations: Int! minIncomingConfirmationsEnv: Boolean! - minContractPayment: String! + minContractPaymentLinkJuels: String! requesters: [String!] } @@ -101,6 +101,12 @@ type VRFSpec { publicKey: String! requestedConfsDelay: Int! requestTimeout: String! + batchCoordinatorAddress: String + batchFulfillmentEnabled: Boolean! + batchFulfillmentGasMultiplier: Float! + chunkSize: Int! + backoffInitialDelay: String! + backoffMaxDelay: String! } type WebhookSpec { @@ -130,4 +136,4 @@ type BootstrapSpec { contractConfigTrackerPollInterval: String contractConfigConfirmations: Int createdAt: Time! -} \ No newline at end of file +} diff --git a/core/web/solana_chains_controller.go b/core/web/solana_chains_controller.go new file mode 100644 index 00000000000..c9de3465d5f --- /dev/null +++ b/core/web/solana_chains_controller.go @@ -0,0 +1,138 @@ +package web + +import ( + "database/sql" + "net/http" + + "github.com/gin-gonic/gin" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + + "github.com/smartcontractkit/chainlink/core/services/chainlink" + "github.com/smartcontractkit/chainlink/core/web/presenters" +) + +// SolanaChainsController manages Solana chains. +type SolanaChainsController struct { + App chainlink.Application +} + +// Index lists Solana chains. +func (cc *SolanaChainsController) Index(c *gin.Context, size, page, offset int) { + solanaChains := cc.App.GetChains().Solana + if solanaChains == nil { + jsonAPIError(c, http.StatusBadRequest, ErrSolanaNotEnabled) + return + } + chains, count, err := solanaChains.ORM().Chains(offset, size) + + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + + var resources []presenters.SolanaChainResource + for _, chain := range chains { + resources = append(resources, presenters.NewSolanaChainResource(chain)) + } + + paginatedResponse(c, "solana_chain", size, page, resources, count, err) +} + +// CreateSolanaChainRequest is a JSONAPI request for creating a Solana chain. +type CreateSolanaChainRequest struct { + ID string `json:"chainID"` + Config db.ChainCfg `json:"config"` +} + +// Show gets a Solana chain by chain id. +func (cc *SolanaChainsController) Show(c *gin.Context) { + solanaChains := cc.App.GetChains().Solana + if solanaChains == nil { + jsonAPIError(c, http.StatusBadRequest, ErrSolanaNotEnabled) + return + } + chain, err := solanaChains.ORM().Chain(c.Param("ID")) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + + jsonAPIResponse(c, presenters.NewSolanaChainResource(chain), "solana_chain") +} + +// Create adds a new Solana chain. +func (cc *SolanaChainsController) Create(c *gin.Context) { + solanaChains := cc.App.GetChains().Solana + if solanaChains == nil { + jsonAPIError(c, http.StatusBadRequest, ErrSolanaNotEnabled) + return + } + + request := &CreateSolanaChainRequest{} + + if err := c.ShouldBindJSON(&request); err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + chain, err := solanaChains.Add(c.Request.Context(), request.ID, request.Config) + + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + + jsonAPIResponseWithStatus(c, presenters.NewSolanaChainResource(chain), "solana_chain", http.StatusCreated) +} + +// UpdateSolanaChainRequest is a JSONAPI request for updating a Solana chain. +type UpdateSolanaChainRequest struct { + Enabled bool `json:"enabled"` + Config db.ChainCfg `json:"config"` +} + +// Update configures an existing Solana chain. +func (cc *SolanaChainsController) Update(c *gin.Context) { + solanaChains := cc.App.GetChains().Solana + if solanaChains == nil { + jsonAPIError(c, http.StatusBadRequest, ErrSolanaNotEnabled) + return + } + + var request UpdateSolanaChainRequest + if err := c.ShouldBindJSON(&request); err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + chain, err := solanaChains.Configure(c.Request.Context(), c.Param("ID"), request.Enabled, request.Config) + + if errors.Is(err, sql.ErrNoRows) { + jsonAPIError(c, http.StatusNotFound, err) + return + } else if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + + jsonAPIResponse(c, presenters.NewSolanaChainResource(chain), "solana_chain") +} + +// Delete removes a Solana chain. +func (cc *SolanaChainsController) Delete(c *gin.Context) { + solanaChains := cc.App.GetChains().Solana + if solanaChains == nil { + jsonAPIError(c, http.StatusBadRequest, ErrSolanaNotEnabled) + return + } + err := solanaChains.Remove(c.Param("ID")) + + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + jsonAPIResponseWithStatus(c, nil, "solana_chain", http.StatusNoContent) +} diff --git a/core/web/solana_chains_controller_test.go b/core/web/solana_chains_controller_test.go new file mode 100644 index 00000000000..246e5ff6f0d --- /dev/null +++ b/core/web/solana_chains_controller_test.go @@ -0,0 +1,373 @@ +package web_test + +import ( + "bytes" + "database/sql" + "encoding/json" + "fmt" + "math/rand" + "net/http" + "testing" + "time" + + "github.com/gagliardetto/solana-go/rpc" + "github.com/manyminds/api2go/jsonapi" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + + "github.com/smartcontractkit/chainlink/core/internal/cltest" + "github.com/smartcontractkit/chainlink/core/internal/testutils" + "github.com/smartcontractkit/chainlink/core/internal/testutils/solanatest" + "github.com/smartcontractkit/chainlink/core/store/models" + "github.com/smartcontractkit/chainlink/core/web" + "github.com/smartcontractkit/chainlink/core/web/presenters" +) + +func Test_SolanaChainsController_Create(t *testing.T) { + t.Parallel() + + controller := setupSolanaChainsControllerTest(t) + + newChainId := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) + + second := models.MustMakeDuration(time.Second) + minute := models.MustMakeDuration(time.Minute) + hour := models.MustMakeDuration(time.Hour) + body, err := json.Marshal(web.CreateSolanaChainRequest{ + ID: newChainId, + Config: db.ChainCfg{ + BalancePollPeriod: &second, + ConfirmPollPeriod: &minute, + OCR2CachePollPeriod: &minute, + OCR2CacheTTL: &second, + TxTimeout: &hour, + SkipPreflight: null.BoolFrom(false), + Commitment: null.StringFrom(string(rpc.CommitmentRecent)), + }, + }) + require.NoError(t, err) + + resp, cleanup := controller.client.Post("/v2/chains/solana", bytes.NewReader(body)) + t.Cleanup(cleanup) + require.Equal(t, http.StatusCreated, resp.StatusCode) + + chainSet := controller.app.GetChains().Solana + dbChain, err := chainSet.ORM().Chain(newChainId) + require.NoError(t, err) + + resource := presenters.SolanaChainResource{} + err = web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, resp), &resource) + require.NoError(t, err) + + assert.Equal(t, resource.ID, dbChain.ID) + assert.Equal(t, resource.Config.BalancePollPeriod, dbChain.Cfg.BalancePollPeriod) + assert.Equal(t, resource.Config.ConfirmPollPeriod, dbChain.Cfg.ConfirmPollPeriod) + assert.Equal(t, resource.Config.OCR2CachePollPeriod, dbChain.Cfg.OCR2CachePollPeriod) + assert.Equal(t, resource.Config.OCR2CacheTTL, dbChain.Cfg.OCR2CacheTTL) + assert.Equal(t, resource.Config.TxTimeout, dbChain.Cfg.TxTimeout) + assert.Equal(t, resource.Config.SkipPreflight, dbChain.Cfg.SkipPreflight) + assert.Equal(t, resource.Config.Commitment, dbChain.Cfg.Commitment) +} + +func Test_SolanaChainsController_Show(t *testing.T) { + t.Parallel() + + const validId = "Chainlink-12" + + hour := models.MustMakeDuration(time.Hour) + testCases := []struct { + name string + inputId string + wantStatusCode int + want func(t *testing.T, app *cltest.TestApplication) *db.Chain + }{ + { + inputId: validId, + name: "success", + want: func(t *testing.T, app *cltest.TestApplication) *db.Chain { + newChainConfig := db.ChainCfg{ + SkipPreflight: null.BoolFrom(false), + TxTimeout: &hour, + } + + chain := db.Chain{ + ID: validId, + Enabled: true, + Cfg: newChainConfig, + } + solanatest.MustInsertChain(t, app.GetSqlxDB(), &chain) + + return &chain + }, + wantStatusCode: http.StatusOK, + }, + { + inputId: "234", + name: "not found", + want: func(t *testing.T, app *cltest.TestApplication) *db.Chain { + return nil + }, + wantStatusCode: http.StatusBadRequest, + }, + } + + for _, testCase := range testCases { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + controller := setupSolanaChainsControllerTest(t) + + wantedResult := tc.want(t, controller.app) + resp, cleanup := controller.client.Get( + fmt.Sprintf("/v2/chains/solana/%s", tc.inputId), + ) + t.Cleanup(cleanup) + require.Equal(t, tc.wantStatusCode, resp.StatusCode) + + if wantedResult != nil { + resource1 := presenters.SolanaChainResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, resp), &resource1) + require.NoError(t, err) + + assert.Equal(t, resource1.ID, wantedResult.ID) + assert.Equal(t, resource1.Config.SkipPreflight, wantedResult.Cfg.SkipPreflight) + assert.Equal(t, resource1.Config.TxTimeout, wantedResult.Cfg.TxTimeout) + } + }) + } +} + +func Test_SolanaChainsController_Index(t *testing.T) { + t.Parallel() + + controller := setupSolanaChainsControllerTest(t) + + hour := models.MustMakeDuration(time.Hour) + newChains := []web.CreateSolanaChainRequest{ + { + ID: fmt.Sprintf("ChainlinktestA-%d", rand.Int31n(999999)), + Config: db.ChainCfg{ + TxTimeout: &hour, + }, + }, + { + ID: fmt.Sprintf("ChainlinktestB-%d", rand.Int31n(999999)), + Config: db.ChainCfg{ + SkipPreflight: null.BoolFrom(false), + }, + }, + } + + for _, newChain := range newChains { + ch := newChain + solanatest.MustInsertChain(t, controller.app.GetSqlxDB(), &db.Chain{ + ID: ch.ID, + Enabled: true, + Cfg: ch.Config, + }) + } + + badResp, cleanup := controller.client.Get("/v2/chains/solana?size=asd") + t.Cleanup(cleanup) + require.Equal(t, http.StatusUnprocessableEntity, badResp.StatusCode) + + resp, cleanup := controller.client.Get("/v2/chains/solana?size=1") + t.Cleanup(cleanup) + require.Equal(t, http.StatusOK, resp.StatusCode) + + body := cltest.ParseResponseBody(t, resp) + + metaCount, err := cltest.ParseJSONAPIResponseMetaCount(body) + require.NoError(t, err) + require.Equal(t, len(newChains), metaCount) + + var links jsonapi.Links + + chains := []presenters.SolanaChainResource{} + err = web.ParsePaginatedResponse(body, &chains, &links) + assert.NoError(t, err) + assert.NotEmpty(t, links["next"].Href) + assert.Empty(t, links["prev"].Href) + + assert.Len(t, links, 1) + assert.Equal(t, newChains[0].ID, chains[0].ID) + assert.Equal(t, newChains[0].Config.SkipPreflight, chains[0].Config.SkipPreflight) + assert.Equal(t, newChains[0].Config.TxTimeout, chains[0].Config.TxTimeout) + + resp, cleanup = controller.client.Get(links["next"].Href) + t.Cleanup(cleanup) + require.Equal(t, http.StatusOK, resp.StatusCode) + + chains = []presenters.SolanaChainResource{} + err = web.ParsePaginatedResponse(cltest.ParseResponseBody(t, resp), &chains, &links) + assert.NoError(t, err) + assert.Empty(t, links["next"].Href) + assert.NotEmpty(t, links["prev"].Href) + + assert.Len(t, links, 1) + assert.Equal(t, newChains[1].ID, chains[0].ID) + assert.Equal(t, newChains[1].Config.SkipPreflight, chains[0].Config.SkipPreflight) + assert.Equal(t, newChains[1].Config.TxTimeout, chains[0].Config.TxTimeout) +} + +func Test_SolanaChainsController_Update(t *testing.T) { + t.Parallel() + + hour := models.MustMakeDuration(time.Hour) + chainUpdate := web.UpdateSolanaChainRequest{ + Enabled: true, + Config: db.ChainCfg{ + SkipPreflight: null.BoolFrom(false), + TxTimeout: &hour, + }, + } + + validId := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) + + testCases := []struct { + name string + inputId string + wantStatusCode int + chainBeforeUpdate func(t *testing.T, app *cltest.TestApplication) *db.Chain + }{ + { + inputId: validId, + name: "success", + chainBeforeUpdate: func(t *testing.T, app *cltest.TestApplication) *db.Chain { + newChainConfig := db.ChainCfg{ + SkipPreflight: null.BoolFrom(false), + TxTimeout: &hour, + } + + chain := db.Chain{ + ID: validId, + Enabled: true, + Cfg: newChainConfig, + } + solanatest.MustInsertChain(t, app.GetSqlxDB(), &chain) + + return &chain + }, + wantStatusCode: http.StatusOK, + }, + { + inputId: "341212", + name: "not found", + chainBeforeUpdate: func(t *testing.T, app *cltest.TestApplication) *db.Chain { + return nil + }, + wantStatusCode: http.StatusNotFound, + }, + } + + for _, testCase := range testCases { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + controller := setupSolanaChainsControllerTest(t) + + beforeUpdate := tc.chainBeforeUpdate(t, controller.app) + + body, err := json.Marshal(chainUpdate) + require.NoError(t, err) + + resp, cleanup := controller.client.Patch( + fmt.Sprintf("/v2/chains/solana/%s", tc.inputId), + bytes.NewReader(body), + ) + t.Cleanup(cleanup) + require.Equal(t, tc.wantStatusCode, resp.StatusCode) + + if beforeUpdate != nil { + resource1 := presenters.SolanaChainResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, resp), &resource1) + require.NoError(t, err) + + assert.Equal(t, resource1.ID, beforeUpdate.ID) + assert.Equal(t, resource1.Enabled, chainUpdate.Enabled) + assert.Equal(t, resource1.Config.SkipPreflight, chainUpdate.Config.SkipPreflight) + assert.Equal(t, resource1.Config.TxTimeout, chainUpdate.Config.TxTimeout) + } + }) + } +} + +func Test_SolanaChainsController_Delete(t *testing.T) { + t.Parallel() + + controller := setupSolanaChainsControllerTest(t) + + hour := models.MustMakeDuration(time.Hour) + newChainConfig := db.ChainCfg{ + SkipPreflight: null.BoolFrom(false), + TxTimeout: &hour, + } + + chainId := fmt.Sprintf("Chainlinktest-%d", rand.Int31n(999999)) + chain := db.Chain{ + ID: chainId, + Enabled: true, + Cfg: newChainConfig, + } + solanatest.MustInsertChain(t, controller.app.GetSqlxDB(), &chain) + + _, countBefore, err := controller.app.Chains.Solana.ORM().Chains(0, 10) + require.NoError(t, err) + require.Equal(t, 1, countBefore) + + t.Run("non-existing chain", func(t *testing.T) { + resp, cleanup := controller.client.Delete("/v2/chains/solana/121231") + t.Cleanup(cleanup) + require.Equal(t, http.StatusInternalServerError, resp.StatusCode) + + _, countAfter, err := controller.app.Chains.Solana.ORM().Chains(0, 10) + require.NoError(t, err) + require.Equal(t, 1, countAfter) + }) + + t.Run("existing chain", func(t *testing.T) { + resp, cleanup := controller.client.Delete( + fmt.Sprintf("/v2/chains/solana/%s", chain.ID), + ) + t.Cleanup(cleanup) + require.Equal(t, http.StatusNoContent, resp.StatusCode) + + _, countAfter, err := controller.app.Chains.Solana.ORM().Chains(0, 10) + require.NoError(t, err) + require.Equal(t, 0, countAfter) + + _, err = controller.app.Chains.Solana.ORM().Chain(chain.ID) + + assert.Error(t, err) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + }) +} + +type TestSolanaChainsController struct { + app *cltest.TestApplication + client cltest.HTTPClientCleaner +} + +func setupSolanaChainsControllerTest(t *testing.T) *TestSolanaChainsController { + cfg := cltest.NewTestGeneralConfig(t) + cfg.Overrides.SolanaEnabled = null.BoolFrom(true) + cfg.Overrides.EVMEnabled = null.BoolFrom(false) + cfg.Overrides.EVMRPCEnabled = null.BoolFrom(false) + app := cltest.NewApplicationWithConfig(t, cfg) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient() + + return &TestSolanaChainsController{ + app: app, + client: client, + } +} diff --git a/core/web/solana_nodes_controller.go b/core/web/solana_nodes_controller.go new file mode 100644 index 00000000000..4913b585a01 --- /dev/null +++ b/core/web/solana_nodes_controller.go @@ -0,0 +1,119 @@ +package web + +import ( + "database/sql" + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/db" + + "github.com/smartcontractkit/chainlink/core/services/chainlink" + "github.com/smartcontractkit/chainlink/core/web/presenters" +) + +// ErrSolanaNotEnabled is returned when SOLANA_ENABLED is not true. +var ErrSolanaNotEnabled = errors.New("Solana is disabled. Set SOLANA_ENABLED=true to enable.") + +// SolanaNodesController manages Solana nodes. +type SolanaNodesController struct { + App chainlink.Application +} + +// Index lists Solana nodes, and optionally filters by chain id. +func (nc *SolanaNodesController) Index(c *gin.Context, size, page, offset int) { + solanaChains := nc.App.GetChains().Solana + if solanaChains == nil { + jsonAPIError(c, http.StatusBadRequest, ErrSolanaNotEnabled) + return + } + orm := solanaChains.ORM() + + id := c.Param("ID") + + var nodes []db.Node + var count int + var err error + + if id == "" { + // fetch all nodes + nodes, count, err = orm.Nodes(offset, size) + + } else { + nodes, count, err = orm.NodesForChain(id, offset, size) + } + + var resources []presenters.SolanaNodeResource + for _, node := range nodes { + resources = append(resources, presenters.NewSolanaNodeResource(node)) + } + + paginatedResponse(c, "node", size, page, resources, count, err) +} + +// Create adds a new Solana node. +func (nc *SolanaNodesController) Create(c *gin.Context) { + solanaChains := nc.App.GetChains().Solana + if solanaChains == nil { + jsonAPIError(c, http.StatusBadRequest, ErrSolanaNotEnabled) + return + } + orm := solanaChains.ORM() + + var request db.NewNode + + if err := c.ShouldBindJSON(&request); err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + // Ensure chain exists. + if _, err := orm.Chain(request.SolanaChainID); err != nil { + if errors.Is(err, sql.ErrNoRows) { + jsonAPIError(c, http.StatusBadRequest, errors.Errorf("Solana chain %s must be added first", request.SolanaChainID)) + return + } + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + node, err := orm.CreateNode(db.Node{ + Name: request.Name, + SolanaChainID: request.SolanaChainID, + SolanaURL: request.SolanaURL, + }) + + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + + jsonAPIResponse(c, presenters.NewSolanaNodeResource(node), "node") +} + +// Delete removes a Solana node. +func (nc *SolanaNodesController) Delete(c *gin.Context) { + solanaChains := nc.App.GetChains().Solana + if solanaChains == nil { + jsonAPIError(c, http.StatusBadRequest, ErrSolanaNotEnabled) + return + } + orm := solanaChains.ORM() + + id, err := strconv.ParseInt(c.Param("ID"), 10, 32) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + err = orm.DeleteNode(int32(id)) + + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + jsonAPIResponseWithStatus(c, nil, "node", http.StatusNoContent) +} diff --git a/core/web/solana_transfer_controller.go b/core/web/solana_transfer_controller.go new file mode 100644 index 00000000000..db1a37c4f83 --- /dev/null +++ b/core/web/solana_transfer_controller.go @@ -0,0 +1,154 @@ +package web + +import ( + "net/http" + + "github.com/gagliardetto/solana-go/programs/system" + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-solana/pkg/solana/client" + + solanaGo "github.com/gagliardetto/solana-go" + + "github.com/smartcontractkit/chainlink/core/chains/solana" + "github.com/smartcontractkit/chainlink/core/services/chainlink" + solanamodels "github.com/smartcontractkit/chainlink/core/store/models/solana" + "github.com/smartcontractkit/chainlink/core/web/presenters" +) + +// SolanaTransfersController can send LINK tokens to another address +type SolanaTransfersController struct { + App chainlink.Application +} + +// Create sends Luna and other native coins from the Chainlink's account to a specified address. +func (tc *SolanaTransfersController) Create(c *gin.Context) { + solanaChains := tc.App.GetChains().Solana + if solanaChains == nil { + jsonAPIError(c, http.StatusBadRequest, ErrSolanaNotEnabled) + return + } + + var tr solanamodels.SendRequest + if err := c.ShouldBindJSON(&tr); err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + if tr.SolanaChainID == "" { + jsonAPIError(c, http.StatusBadRequest, errors.New("missing solanaChainID")) + return + } + chain, err := solanaChains.Chain(c.Request.Context(), tr.SolanaChainID) + switch err { + case solana.ErrChainIDInvalid, solana.ErrChainIDEmpty: + jsonAPIError(c, http.StatusBadRequest, err) + return + case nil: + break + default: + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + if tr.From.IsZero() { + jsonAPIError(c, http.StatusUnprocessableEntity, errors.Errorf("source address is missing: %v", tr.From)) + return + } + + if tr.Amount == 0 { + jsonAPIError(c, http.StatusBadRequest, errors.New("amount must be greater than zero")) + return + } + + fromKey, err := tc.App.GetKeyStore().Solana().Get(tr.From.String()) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, errors.Errorf("fail to get key: %v", err)) + return + } + + txm := chain.TxManager() + var reader client.Reader + reader, err = chain.Reader() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, errors.Errorf("chain unreachable: %v", err)) + return + } + + blockhash, err := reader.LatestBlockhash() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, errors.Errorf("failed to get latest block hash: %v", err)) + return + } + + tx, err := solanaGo.NewTransaction( + []solanaGo.Instruction{ + system.NewTransferInstruction( + tr.Amount, + tr.From, + tr.To, + ).Build(), + }, + blockhash.Value.Blockhash, + solanaGo.TransactionPayer(tr.From), + ) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, errors.Errorf("failed to create tx: %v", err)) + return + } + + if !tr.AllowHigherAmounts { + if err := solanaValidateBalance(reader, tr.From, tr.Amount, tx.Message.ToBase64()); err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, errors.Errorf("failed to validate balance: %v", err)) + return + } + } + + // marshal transaction + msg, err := tx.Message.MarshalBinary() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, errors.Errorf("failed to marshal tx: %v", err)) + return + } + + // sign tx + sigBytes, err := fromKey.Sign(msg) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, errors.Errorf("failed to sign tx: %v", err)) + return + } + var finalSig [64]byte + copy(finalSig[:], sigBytes) + tx.Signatures = append(tx.Signatures, finalSig) + + err = txm.Enqueue("", tx) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, errors.Errorf("transaction failed: %v", err)) + return + } + + resource := presenters.NewSolanaMsgResource("sol_transfer_"+uuid.New().String(), tr.SolanaChainID) + resource.Amount = tr.Amount + resource.From = tr.From.String() + resource.To = tr.To.String() + + jsonAPIResponse(c, resource, "solana_tx") +} + +func solanaValidateBalance(reader client.Reader, from solanaGo.PublicKey, amount uint64, msg string) error { + balance, err := reader.Balance(from) + if err != nil { + return err + } + + fee, err := reader.GetFeeForMessage(msg) + if err != nil { + return err + } + + if balance < (amount + fee) { + return errors.Errorf("balance %d is too low for this transaction to be executed: amount %d + fee %d", balance, amount, fee) + } + return nil +} diff --git a/core/web/terra_nodes_controller.go b/core/web/terra_nodes_controller.go index 75322c277e7..326bfc3177c 100644 --- a/core/web/terra_nodes_controller.go +++ b/core/web/terra_nodes_controller.go @@ -81,7 +81,11 @@ func (nc *TerraNodesController) Create(c *gin.Context) { return } - node, err := orm.CreateNode(request) + node, err := orm.CreateNode(db.Node{ + Name: request.Name, + TerraChainID: request.TerraChainID, + TendermintURL: request.TendermintURL, + }) if err != nil { jsonAPIError(c, http.StatusBadRequest, err) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 20a09acb928..c27daad35aa 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -8,6 +8,24 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] ... + +## [1.4.0] - 2022-05-02 + +### Added + +- JSON parse tasks (v2) now support a custom `separator` parameter to substitute for the default `,`. +- Log slow SQL queries +- Fantom and avalanche block explorer urls +- Display `requestTimeout` in job UI +- Keeper upkeep order is shuffled + +### Fixed + +- `LOG_FILE_MAX_SIZE` handling +- Improved websocket subscription management (fixes issues with multiple-primary-node failover from 1.3.x) +- VRFv2 fixes and enhancements +- UI support for `minContractPaymentLinkJuels` + ## [1.3.0] - 2022-04-18 ### Added @@ -64,20 +82,22 @@ export EVM_NODES=' ### Changed -- Changed default locking mode to "dual". Bugs in lease locking have been ironed out and this paves the way to making "lease" the default in future. It is recommended to set `DATABASE_LOCKING_MODE=lease`, default is set to "dual" only for backwards compatibility. +- Changed default locking mode to "dual". Bugs in lease locking have been ironed out and this paves the way to making "lease" the default in the future. It is recommended to set `DATABASE_LOCKING_MODE=lease`, default is set to "dual" only for backwards compatibility. - EIP-1559 is now enabled by default on mainnet. To disable (go back to legacy mode) set `EVM_EIP1559_DYNAMIC_FEES=false`. The default settings should work well, but if you wish to tune your gas controls, see the [documentation](https://docs.chain.link/docs/configuration-variables/#evm-gas-controls). -Note that EIP-1559 can be manually enabled on other chains by setting `EVM_EIP1559_DYNAMIC_FEES=true` but we only support it for official Ethereum mainnet and testnets. It is _not_ recommended to enable this setting on Polygon since during our testing process we found that the EIP-1559 fee market appears to be broken on all Polygon chains and EIP-1559 transactions are actually less likely to get included than legacy transactions. +Note that EIP-1559 can be manually enabled on other chains by setting `EVM_EIP1559_DYNAMIC_FEES=true` but we only support it for official Ethereum mainnet and testnets. It is _not_ recommended enabling this setting on Polygon since during our testing process we found that the EIP-1559 fee market appears to be broken on all Polygon chains and EIP-1559 transactions are actually less likely to get included than legacy transactions. See issue: https://github.com/maticnetwork/bor/issues/347 +- The pipeline task runs have changed persistence protocol (database), which will result in inability to decode some existing task runs. All new runs should be working with no issues. + ### Removed - `LOG_TO_DISK` ENV var. ## [1.2.1] - 2022-03-17 -This release hotfixes issues from moving a new CI/CD system. Featurewise the functionality is the same as `v1.2.0`. +This release hotfixes issues from moving a new CI/CD system. Feature-wise the functionality is the same as `v1.2.0`. ### Fixed diff --git a/go.mod b/go.mod index d171b2a8f70..25717a6bc07 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/smartcontractkit/chainlink -go 1.17 +go 1.18 require ( github.com/Depado/ginprom v1.7.3 @@ -15,7 +15,7 @@ require ( github.com/ethereum/go-ethereum v1.10.16 github.com/fatih/color v1.13.0 github.com/fxamacker/cbor/v2 v2.4.0 - github.com/gagliardetto/solana-go v1.0.4 + github.com/gagliardetto/solana-go v1.4.1-0.20220413001530-3e39c80b7211 github.com/getsentry/sentry-go v0.12.0 github.com/gin-contrib/cors v1.3.1 github.com/gin-contrib/expvar v0.0.0-20181230111036-f23b556cc79f @@ -34,7 +34,7 @@ require ( github.com/jackc/pgx/v4 v4.15.0 github.com/jpillora/backoff v1.0.0 github.com/kylelemons/godebug v1.1.0 - github.com/lib/pq v1.10.4 + github.com/lib/pq v1.10.5 github.com/libp2p/go-libp2p-core v0.8.5 github.com/libp2p/go-libp2p-peerstore v0.2.7 github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f @@ -44,7 +44,7 @@ require ( github.com/multiformats/go-multiaddr v0.3.3 github.com/okex/exchain-ethereum-compatible v1.1.0 github.com/olekukonko/tablewriter v0.0.5 - github.com/onsi/gomega v1.18.1 + github.com/onsi/gomega v1.19.0 github.com/pelletier/go-toml v1.9.4 github.com/pkg/errors v0.9.1 github.com/pressly/goose/v3 v3.5.3 @@ -54,15 +54,15 @@ require ( github.com/scylladb/go-reflectx v1.0.1 github.com/shirou/gopsutil/v3 v3.22.2 github.com/shopspring/decimal v1.3.1 - github.com/smartcontractkit/chainlink-solana v0.2.18-0.20220315140817-b4df0b6bc414 - github.com/smartcontractkit/chainlink-terra v0.1.4-0.20220315114020-a15962b0ed9b - github.com/smartcontractkit/libocr v0.0.0-20220217180537-449836e6cfec + github.com/smartcontractkit/chainlink-solana v0.2.20-0.20220420200429-3da7f865d367 + github.com/smartcontractkit/chainlink-terra v0.1.4-0.20220420200433-90c1ac0f3c2a + github.com/smartcontractkit/libocr v0.0.0-20220414173908-cdfa6bef133a github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb github.com/smartcontractkit/terra.go v1.0.3-0.20220108002221-62b39252ee16 github.com/smartcontractkit/wsrpc v0.3.10-0.20220317191700-8c8ecdcaed4a github.com/spf13/cobra v1.3.0 github.com/spf13/viper v1.10.1 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.7.1 github.com/tendermint/tendermint v0.34.15 github.com/terra-money/core v0.5.14 github.com/test-go/testify v1.1.4 @@ -77,12 +77,13 @@ require ( go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 golang.org/x/crypto v0.0.0-20220210151621-f4118a5b28e2 + golang.org/x/exp v0.0.0-20220328175248-053ad81199eb golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 golang.org/x/text v0.3.7 golang.org/x/tools v0.1.9 - gonum.org/v1/gonum v0.9.3 - google.golang.org/protobuf v1.27.1 + gonum.org/v1/gonum v0.11.0 + google.golang.org/protobuf v1.28.0 gopkg.in/guregu/null.v4 v4.0.0 ) @@ -106,8 +107,9 @@ require ( github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cloudflare/cfssl v0.0.0-20190726000631-633726f6bcb7 // indirect + github.com/codegangsta/negroni v1.0.0 // indirect github.com/confio/ics23/go v0.6.6 // indirect - github.com/containerd/containerd v1.5.9 // indirect + github.com/containerd/containerd v1.5.10 // indirect github.com/cosmos/btcutil v1.0.4 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect github.com/cosmos/iavl v0.17.3 // indirect @@ -124,7 +126,7 @@ require ( github.com/dgraph-io/ristretto v0.0.3 // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-units v0.4.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b // indirect @@ -134,7 +136,7 @@ require ( github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/gagliardetto/binary v0.5.2 // indirect + github.com/gagliardetto/binary v0.6.1 // indirect github.com/gagliardetto/treeout v0.1.4 // indirect github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 // indirect github.com/gin-contrib/sse v0.1.0 // indirect @@ -264,7 +266,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/common v0.33.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/prometheus/tsdb v0.10.0 // indirect github.com/rakyll/statik v0.1.7 // indirect @@ -306,11 +308,12 @@ require ( go.etcd.io/bbolt v1.3.6 // indirect go.opencensus.io v0.23.0 // indirect go.uber.org/ratelimit v0.2.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect google.golang.org/grpc v1.43.0 // indirect + gopkg.in/guregu/null.v2 v2.1.2 // indirect gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect diff --git a/go.sum b/go.sum index 7711fd57245..451308f431d 100644 --- a/go.sum +++ b/go.sum @@ -1,11 +1,7 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.33.1/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= -cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -60,15 +56,9 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= contrib.go.opencensus.io/exporter/stackdriver v0.12.6/go.mod h1:8x999/OcIPy5ivx/wDiV7Gx4D+VUPODf0mWRGRc5kSk= contrib.go.opencensus.io/exporter/stackdriver v0.13.4 h1:ksUxwH3OD5sxkjzEqGxNTl+Xjsmu3BnC/300MhSVTSc= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= -dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= -dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= -dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= filippo.io/edwards25519 v1.0.0-beta.3 h1:WQxB0FH5NzrhciInJ30bgL3soLng3AbdI651yQuVlCs= filippo.io/edwards25519 v1.0.0-beta.3/go.mod h1:X+pm78QAUPtFLi1z9PYIlS/bdDnvbCOGKtZ+ACWEf7o= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= github.com/99designs/keyring v1.1.6 h1:kVDC2uCgVwecxCk+9zoCt2uEL6dt+dfVzMvGgnVcIuM= @@ -120,13 +110,10 @@ github.com/CosmWasm/wasmvm v0.16.0/go.mod h1:Id107qllDJyJjVQQsKMOy2YYF98sqPJ2t+j github.com/CosmWasm/wasmvm v0.16.3 h1:hUf33EHRmyyvKMhwVl7nMaAOY0vYJVB4bhU+HPfHfBM= github.com/CosmWasm/wasmvm v0.16.3/go.mod h1:Id107qllDJyJjVQQsKMOy2YYF98sqPJ2t+jX1QES40A= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DATA-DOG/go-txdb v0.1.3/go.mod h1:DhAhxMXZpUJVGnT+p9IbzJoRKvlArO2pkHjnGX7o0n0= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/Depado/ginprom v1.2.1-0.20200115153638-53bbba851bd8/go.mod h1:VHRucFf/9saDXsYg6uzQ8Oo8gUwngtWec9ZJ00H+ZCc= github.com/Depado/ginprom v1.7.3 h1:3e/MujyuVbm73D0ReFsMKSW6P7PLXjMcZVeukY2C150= github.com/Depado/ginprom v1.7.3/go.mod h1:VnTr/tCjWPNw5h/DF4lc3z9fqZ/aLNgJ0PUp3Vvxo/Y= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= @@ -164,13 +151,11 @@ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= @@ -196,27 +181,18 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= -github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI= github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= -github.com/araddon/dateparse v0.0.0-20190622164848-0fb0a474d195/go.mod h1:SLqhdZcd+dF3TEVL2RMoob5bBP5R1P1qkox+HtCBgGI= -github.com/aristanetworks/fsnotify v1.4.2/go.mod h1:D/rtu7LpjYM8tRJphJ0hUBYpjai8SfX+aSNsWDTq/Ks= -github.com/aristanetworks/glog v0.0.0-20180419172825-c15b03b3054f/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= -github.com/aristanetworks/goarista v0.0.0-20190204200901-2166578f3448/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= -github.com/aristanetworks/goarista v0.0.0-20191023202215-f096da5361bb/go.mod h1:Z4RTxGAuYhPzcq8+EdRM+R8M48Ssle2TsWtwRKa+vns= -github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/2YanvOEnLMUgsFrxBROc= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -270,12 +246,9 @@ github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHf github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= -github.com/boj/redistore v0.0.0-20160128113310-fc113767cd6b/go.mod h1:5r9chGCb4uUhBCGMDDCYfyHU/awSRoBeG53Zaj1crhU= github.com/boj/redistore v0.0.0-20180917114910-cd5dcc76aeff h1:RmdPFa+slIr4SCBg4st/l/vZWVe9QJKMXGO60Bxbe04= github.com/boj/redistore v0.0.0-20180917114910-cd5dcc76aeff/go.mod h1:+RTT1BOk5P97fT2CiHkbFQwkK3mjsFAP6zCYV2aXtjw= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0= @@ -302,7 +275,6 @@ github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= @@ -316,9 +288,8 @@ github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU= -github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM= @@ -327,7 +298,6 @@ github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cb github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= -github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -403,8 +373,8 @@ github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7 github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= -github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/containerd v1.5.10 h1:3cQ2uRVCkJVcx5VombsE7105Gl9Wrl7ORAO3+4+ogf4= +github.com/containerd/containerd v1.5.10/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -470,7 +440,6 @@ github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmf github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= @@ -526,19 +495,14 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= -github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= -github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= -github.com/denisenkom/go-mssqldb v0.0.0-20181014144952-4e0d7dc8888f/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= -github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= -github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= @@ -570,8 +534,9 @@ github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5O github.com/docker/cli v20.10.11+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= @@ -587,7 +552,6 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dop251/goja v0.0.0-20200219165308-d1232e640a87/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= @@ -624,16 +588,12 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= -github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ethereum-optimism/go-optimistic-ethereum-utils v0.1.0 h1:+Pj8lKxF/2v5Frwrlted7XxcdlK7UtBIyfmrB+CrhD8= github.com/ethereum-optimism/go-optimistic-ethereum-utils v0.1.0/go.mod h1:V6tsDFR2US/KgDIk0ml2gvnlQU/ss9NA1pV36wC7Njo= github.com/ethereum/go-ethereum v1.9.10/go.mod h1:lXHkVo/MTvsEXfYsmNzelZ8R1e0DTvdk/wMZJIRpaRw= -github.com/ethereum/go-ethereum v1.9.18/go.mod h1:JSSTypSMTkGZtAdAChH2wP5dZEvPGh3nUTuDpH+hNrg= -github.com/ethereum/go-ethereum v1.9.24/go.mod h1:JIfVb6esrqALTExdz9hRYvrP0xBDf6wCncIu1hNwHpM= github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8plZ0M9VMk4/oM= github.com/ethereum/go-ethereum v1.10.8/go.mod h1:pJNuIUYfX5+JKzSD/BTdNsvJSZ1TJqmz0dVyXMAbf6M= -github.com/ethereum/go-ethereum v1.10.11/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw= github.com/ethereum/go-ethereum v1.10.16 h1:3oPrumn0bCW/idjcxMn5YYVCdK7VzJYIvwGZUGLEaoc= github.com/ethereum/go-ethereum v1.10.16/go.mod h1:Anj6cxczl+AHy63o4X9O8yWNHuN5wMpfb8MAnHkWn7Y= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -657,11 +617,9 @@ github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.1+incompatible h1:xdtqez379uWVJ9P3qQMX8W+F/nqsTdUvyMZB36tnacA= github.com/form3tech-oss/jwt-go v3.2.1+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= @@ -669,7 +627,6 @@ github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNy github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= @@ -682,19 +639,18 @@ github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXt github.com/fxamacker/cbor/v2 v2.2.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= -github.com/gagliardetto/binary v0.5.2 h1:puURDkknQkF/e5bx2JtnYv9pEdBf5YCx5Qh99Mk9A00= -github.com/gagliardetto/binary v0.5.2/go.mod h1:peJR9PvwamL4YOh1nHWCPLry2VEfeeD1ADvewka7HnQ= +github.com/gagliardetto/binary v0.6.1 h1:vGrbUym10xaaswadfnuSDr0xlP3NZS5XWbLqENJidrI= +github.com/gagliardetto/binary v0.6.1/go.mod h1:aOfYkc20U0deHaHn/LVZXiqlkDbFAX0FpTlDhsXa0S0= +github.com/gagliardetto/gofuzz v1.2.2 h1:XL/8qDMzcgvR4+CyRQW9UGdwPRPMHVJfqQ/uMvSUuQw= github.com/gagliardetto/gofuzz v1.2.2/go.mod h1:bkH/3hYLZrMLbfYWA0pWzXmi5TTRZnu4pMGZBkqMKvY= -github.com/gagliardetto/solana-go v1.0.4 h1:+KnQHKjW+Kl+/74smnNsS6PTcm5RxTqLKJievOI14Xk= -github.com/gagliardetto/solana-go v1.0.4/go.mod h1:S1ds1RHgJPmZJLVZ/AB09o9TlDBFsPGmxUcOrgvfAY8= +github.com/gagliardetto/solana-go v1.4.1-0.20220413001530-3e39c80b7211 h1:o+QozGVXw4UUBaoX0+mGOPzIzxkEyJFfW3fM9OGyM+s= +github.com/gagliardetto/solana-go v1.4.1-0.20220413001530-3e39c80b7211/go.mod h1:NFuoDwHPvw858ZMHUJr6bkhN8qHt4x6e+U3EYHxAwNY= github.com/gagliardetto/treeout v0.1.4 h1:ozeYerrLCmCubo1TcIjFiOWTTGteOOHND1twdFpgwaw= github.com/gagliardetto/treeout v0.1.4/go.mod h1:loUefvXTrlRG5rYmJmExNryyBRh8f89VZhmMOyCyqok= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= -github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 h1:Uc+IZ7gYqAf/rSGFplbWBSHaGolEQlNLgMgSE3ccnIQ= github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813/go.mod h1:P+oSoE9yhSRvsmYyZsshflcR6ePWYLql6UU1amW13IM= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= @@ -718,22 +674,16 @@ github.com/gin-gonic/contrib v0.0.0-20190526021735-7fb7810ed2a0/go.mod h1:iqneQ2 github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= -github.com/gin-gonic/gin v1.6.0/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.7.0/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/gin-gonic/gin v1.7.7 h1:3DoBmSbJbZAWqXJC3SLjAPfutPJJRN1U5pALB7EeTTs= github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.1 h1:IvVlgbzSsaUNudsw5dcXSzF3EWyXTi5XrAdngnuhRyg= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -746,7 +696,6 @@ github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEai github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -788,13 +737,7 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= -github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= -github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk= -github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= @@ -805,7 +748,6 @@ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -828,7 +770,6 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -862,7 +803,6 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= @@ -892,7 +832,6 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -920,7 +859,6 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= @@ -928,15 +866,12 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -958,7 +893,6 @@ github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36j github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.1.1/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -983,7 +917,6 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaD github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -997,7 +930,6 @@ github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= -github.com/guregu/null v3.5.0+incompatible/go.mod h1:ePGpQaN9cw0tj45IR5E5ehMvsFlLlQZAkkOXZurJ3NM= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= @@ -1091,7 +1023,6 @@ github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7m github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= -github.com/influxdata/influxdb1-client v0.0.0-20190809212627-fc22c7df067e/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= @@ -1213,21 +1144,11 @@ github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsj github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jhump/protoreflect v1.8.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= github.com/jhump/protoreflect v1.9.0/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= -github.com/jinzhu/gorm v1.9.2/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= -github.com/jinzhu/gorm v1.9.11-0.20190912141731-0c98e7d712e2/go.mod h1:bu/pK8szGZ2puuErfU0RwyeNdsf3e6nCX/noXaVxkfw= -github.com/jinzhu/gorm v1.9.16/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs= -github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v0.0.0-20181116074157-8ec929ed50c3/go.mod h1:oHTiXerJ20+SfYcrdlBO7rzZRJWGwSTQ0iUY2jI6Gfc= -github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -1238,9 +1159,7 @@ github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+ github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w= github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v0.0.0-20170918002102-8eab2debe79d/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= @@ -1260,16 +1179,12 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/karalabe/usb v0.0.0-20191104083709-911d15fe12a9/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= @@ -1296,7 +1211,6 @@ github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM52 github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/klauspost/reedsolomon v1.9.2/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1310,7 +1224,6 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -1321,7 +1234,6 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/leanovate/gopter v0.2.8/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a h1:dHCfT5W7gghzPtfsW488uPmEOm85wewI+ypUwibyTdU= github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= @@ -1332,13 +1244,12 @@ github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ic github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ= +github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= github.com/libp2p/go-addr-util v0.0.2 h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= @@ -1361,9 +1272,6 @@ github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZk github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= -github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.10.2/go.mod h1:BYckt6lmS/oA1SlRETSPWSUulCQKiZuTVsymVMc//HQ= github.com/libp2p/go-libp2p v0.12.0/go.mod h1:FpHZrfC1q7nA8jitvdjKBDF31hguaC676g/nT9PgQM0= github.com/libp2p/go-libp2p v0.13.0 h1:tDdrXARSghmusdm0nf1U/4M8aj8Rr0V2IzQOXmbzQ3s= github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= @@ -1374,20 +1282,14 @@ github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/ github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.3.1/go.mod h1:0OzOi1/cVc7UcxfOddemYD5vzEqi4fwRbnZcJGLi68U= github.com/libp2p/go-libp2p-autonat v0.4.0 h1:3y8XQbpr+ssX8QfZUHekjHCYK64sj6/4hnf/awD4+Ug= github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= github.com/libp2p/go-libp2p-blankhost v0.2.0 h1:3EsGAi0CBGcZ33GwRuXEYJLLPoVWyXJ1bcJzAJjINkk= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= -github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= -github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= -github.com/libp2p/go-libp2p-circuit v0.3.1/go.mod h1:8RMIlivu1+RxhebipJwFDA45DasLx+kkrp4IlJj53F4= github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFjFDfAOO1wc= github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= @@ -1401,7 +1303,6 @@ github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUh github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.2/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= github.com/libp2p/go-libp2p-core v0.5.3/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= @@ -1416,13 +1317,10 @@ github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJB github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= github.com/libp2p/go-libp2p-discovery v0.5.0 h1:Qfl+e5+lfDgwdrXdu4YNCWyEo3fWuP+WgN9mN0iWviQ= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-kad-dht v0.8.3/go.mod h1:HnYYy8taJWESkqiESd1ngb9XX/XGGsMA5G0Vj2HoSh4= github.com/libp2p/go-libp2p-kad-dht v0.11.1 h1:FsriVQhOUZpCotWIjyFSjEDNJmUzuMma/RyyTDZanwc= github.com/libp2p/go-libp2p-kad-dht v0.11.1/go.mod h1:5ojtR2acDPqh/jXf5orWy8YGb8bHQDS+qeDcoscL/PI= -github.com/libp2p/go-libp2p-kbucket v0.4.2/go.mod h1:7sCeZx2GkNK1S6lQnGUW5JYZCFPnXzAZCCBBS70lytY= github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk= github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= @@ -1431,7 +1329,6 @@ github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3 github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.2.4/go.mod h1:mI7iOezdWFOisvUwaYd3IDrJ4oVmgoXK8H331ui39CE= github.com/libp2p/go-libp2p-mplex v0.3.0/go.mod h1:l9QWxRbbb5/hQMECEb908GbS9Sm2UAR2KFZKUJEynEs= github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aDN3Fx1sc= @@ -1451,14 +1348,11 @@ github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVd github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= -github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.7 h1:83JoLxyR9OYTnNfB5vvFqvMUv/xDNa6NoPHnENhBsGw= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= -github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= @@ -1470,7 +1364,6 @@ github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncH github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= @@ -1565,10 +1458,8 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-b github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1576,15 +1467,12 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f h1:tVvGiZQFjOXP+9YyGqSA6jE55x1XVxmoPYudncxrZ8U= github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f/go.mod h1:Z60vy0EZVSu0bOugCHdcN5ZxFMKSpjRgsnh0XKPFqqk= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= -github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= @@ -1619,15 +1507,12 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.10 h1:MLn+5bFRlWMGoSRmJour3CL1w/qL96mvipqpwQW/Sfk= github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= @@ -1638,13 +1523,11 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= @@ -1676,7 +1559,6 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= @@ -1758,7 +1640,6 @@ github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhf github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.1.2/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.0 h1:6AuNmQVKUkRnddw2YiDjt5Elit40SFxMJkVnhmETXtU= github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= @@ -1788,8 +1669,6 @@ github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1t github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= -github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/neilotoole/errgroup v0.1.5/go.mod h1:Q2nLGf+594h0CLBs/Mbg6qOr7GtqDK7C2S41udRnToE= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= @@ -1805,7 +1684,6 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1819,11 +1697,9 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -1832,18 +1708,13 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= -github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1878,7 +1749,6 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= @@ -1897,7 +1767,6 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= -github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= @@ -1913,9 +1782,6 @@ github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= @@ -1939,7 +1805,6 @@ github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prY github.com/pressly/goose/v3 v3.5.3 h1:lIQIIXVbdO2RuQtJBS1e7MZjKEk0demVWt6i0YPiOrg= github.com/pressly/goose/v3 v3.5.3/go.mod h1:IL4NNMdXx9O6hHpGbNB5l1hkVe/Avoz4gBDE5g7rQNg= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -1961,9 +1826,7 @@ github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181120120127-aeab699e26f4/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1976,10 +1839,10 @@ github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE= +github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -2020,10 +1883,8 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= @@ -2037,7 +1898,6 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= @@ -2068,29 +1928,7 @@ github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9Nz github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= -github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= -github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= -github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= -github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= -github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= -github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= -github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= -github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= -github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= -github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -2100,15 +1938,12 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartcontractkit/chainlink v0.8.10-0.20200825114219-81dd2fc95bac/go.mod h1:j7qIYHGCN4QqMXdO8g8A9dmUT5vKFmkxPSbjAIfrfNU= -github.com/smartcontractkit/chainlink v0.9.5-0.20201207211610-6c7fee37d5b7/go.mod h1:kmdLJbVZRCnBLiL6gG+U+1+0ofT3bB48DOF8tjQvcoI= -github.com/smartcontractkit/chainlink-solana v0.2.18-0.20220315140817-b4df0b6bc414 h1:2gjlzPo/FrxcSM28R2IxI4oCoNaG3cy3OZdByUHsy/E= -github.com/smartcontractkit/chainlink-solana v0.2.18-0.20220315140817-b4df0b6bc414/go.mod h1:6aPUF3+paoUKqxiCKCDAnPkWjv5PtZyPY4+HCEjGbiI= -github.com/smartcontractkit/chainlink-terra v0.1.4-0.20220315114020-a15962b0ed9b h1:t6aU0/re3Oy1lUwc0Jxnqjs8hx24wiyFh82yLSm+Bj0= -github.com/smartcontractkit/chainlink-terra v0.1.4-0.20220315114020-a15962b0ed9b/go.mod h1:jUCC6lW9Y8iWOupdw72u1ds22fCpM/qGqmeN5PQERoE= -github.com/smartcontractkit/libocr v0.0.0-20201203233047-5d9b24f0cbb5/go.mod h1:bfdSuLnBWCkafDvPGsQ1V6nrXhg046gh227MKi4zkpc= -github.com/smartcontractkit/libocr v0.0.0-20220217180537-449836e6cfec h1:vkTYAihTA8qQ30LKd0272NWLzOCWWCw78qk0LtIej0A= -github.com/smartcontractkit/libocr v0.0.0-20220217180537-449836e6cfec/go.mod h1:nq3crM3wVqnyMlM/4ZydTuJ/WyCapAsOt7P94oRgSPg= +github.com/smartcontractkit/chainlink-solana v0.2.20-0.20220420200429-3da7f865d367 h1:HfFnFidkNFhtEGWwPmdmv6j6apGo3Z5yuuUyU6VVQAE= +github.com/smartcontractkit/chainlink-solana v0.2.20-0.20220420200429-3da7f865d367/go.mod h1:iwugJOiM4V58Lt181Pp0MoPwnqTH2lE55bhddWTOdsg= +github.com/smartcontractkit/chainlink-terra v0.1.4-0.20220420200433-90c1ac0f3c2a h1:shnSDB+PfiEwjR6XDc/s5aWUcZJ7CYHkJsNoMDv4MPM= +github.com/smartcontractkit/chainlink-terra v0.1.4-0.20220420200433-90c1ac0f3c2a/go.mod h1:N1iTQh1GKVD89SNRXuxD+88mC6+u8dzkwchM9dtbHtM= +github.com/smartcontractkit/libocr v0.0.0-20220414173908-cdfa6bef133a h1:j5v0DlqdteLKgCB7My1On+KOO4levVV51byFGt3DoNA= +github.com/smartcontractkit/libocr v0.0.0-20220414173908-cdfa6bef133a/go.mod h1:tHNcoCeQdbRfjvVZjvd3n8yNTKUdnM+NmlWgSHhfceY= github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb h1:OMaBUb4X9IFPLbGbCHsMU+kw/BPCrewaVwWGIBc0I4A= github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb/go.mod h1:HNUu4cJekUdsJbwRBCiOybtkPJEfGRELQPe2tkoDEyk= github.com/smartcontractkit/terra.go v1.0.3-0.20220108002221-62b39252ee16 h1:k+E0RKzVSG1QpxXakNUtcGUhq4ZMe0MAJ5Awg/l9oSc= @@ -2122,8 +1957,6 @@ github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qt github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= @@ -2132,7 +1965,6 @@ github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIa github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= @@ -2172,7 +2004,6 @@ github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8q github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw= -github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= @@ -2194,10 +2025,10 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -2209,12 +2040,9 @@ github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKk github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= -github.com/templexxx/cpufeat v0.0.0-20180724012125-cef66df7f161/go.mod h1:wM7WEvslTq+iOEAMDLSzhVuOt5BRZ05WirO+b09GHQU= -github.com/templexxx/xor v0.0.0-20181023030647-4e92f724b73b/go.mod h1:5XA7W9S6mni3h5uvOC75dA3m9CCCaS83lltmc0ukdi4= github.com/tendermint/btcd v0.1.1 h1:0VcxPfflS2zZ3RiOAHkBiFUcPvbtRj5O7zHmcJWHV7s= github.com/tendermint/btcd v0.1.1/go.mod h1:DC6/m53jtQzr/NFmMNEu0rxf18/ktVoVtMrnDD5pN+U= github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15 h1:hqAk8riJvK4RMWx1aInLzndwxKalgi5rTqgfXxOxbEI= @@ -2245,30 +2073,20 @@ github.com/terra-money/core v0.5.14 h1:rXm7APnaqICec56nG/K8BiqoCY+qBtD8zT83d/Fys github.com/terra-money/core v0.5.14/go.mod h1:wO54CXtp2c32FqhLDEJtuK8gihPMyu+T0vpSVB+Ud98= github.com/test-go/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE= github.com/test-go/testify v1.1.4/go.mod h1:rH7cfJo/47vWGdi4GPj16x3/t1xGOj2YxzmNQzk2ghU= -github.com/tevino/abool v0.0.0-20170917061928-9b9efcf221b5/go.mod h1:f1SCnEOt6sc3fOJfPQDRDzHOtSXuTtnz0ImG9kPRDV0= github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a h1:YuO+afVc3eqrjiCUizNCxI53bl/BnPiVwXqLzqYTqgU= github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a/go.mod h1:/sfW47zCZp9FrtGcWyo1VjbgDaodxX9ovZvgLb/MxaA= -github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/gjson v1.6.1/go.mod h1:BaHyNc5bjzYkPqgLq7mdVzeiRtULKULXLgZFKsxEHI0= -github.com/tidwall/gjson v1.6.3/go.mod h1:BaHyNc5bjzYkPqgLq7mdVzeiRtULKULXLgZFKsxEHI0= github.com/tidwall/gjson v1.6.7/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI= github.com/tidwall/gjson v1.9.3 h1:hqzS9wAHMO+KVBBkLxYdkEeeFHuqr95GfClRLKlgK0E= github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/sjson v1.1.1/go.mod h1:yvVuSnpEQv5cYIrO+AT6kw4QVfd5SDZoGIS7/5+fZFs= -github.com/tidwall/sjson v1.1.2/go.mod h1:SEzaDwxiPzKzNfUEO4HbYF/m4UCSJDsGgNqsS1LvdoY= github.com/tidwall/sjson v1.1.4/go.mod h1:wXpKXu8CtDjKAZ+3DrKY5ROCorDFahq8l0tey/Lx1fg= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= -github.com/tjfoc/gmsm v1.0.1/go.mod h1:XxO4hdhhrzAd+G4CjDqaOkd0hUzmtPR/d3EiBBMn/wc= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= @@ -2299,7 +2117,6 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU= github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= @@ -2309,8 +2126,6 @@ github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBn github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= -github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -2319,7 +2134,6 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17 github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmihailenco/msgpack/v5 v5.1.4/go.mod h1:C5gboKD0TJPqWDTVTtrQNfRbiBwHZGo8UTqP/9/XvLI= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= @@ -2335,8 +2149,6 @@ github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+m github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= @@ -2344,8 +2156,6 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/xtaci/kcp-go v5.4.5+incompatible/go.mod h1:bN6vIwHQbfHaHtFpEssmWsN45a+AZwO7eyRCmEIbtvE= -github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE= github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= github.com/ybbus/jsonrpc v2.1.2+incompatible/go.mod h1:XJrh1eMSzdIYFbM08flv0wp5G35eRniyeGut1z+LSiE= github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= @@ -2370,7 +2180,6 @@ go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= -go.dedis.ch/kyber/v3 v3.0.12/go.mod h1:kXy7p3STAurkADD+/aZcsznZGKVHEqbtmdIzvPfrs1U= go.dedis.ch/kyber/v3 v3.0.13 h1:s5Lm8p2/CsTMueQHCN24gPpZ4couBBeKU7r2Yl6r32o= go.dedis.ch/kyber/v3 v3.0.13/go.mod h1:kXy7p3STAurkADD+/aZcsznZGKVHEqbtmdIzvPfrs1U= go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= @@ -2392,7 +2201,6 @@ go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsX go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -2413,7 +2221,6 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= @@ -2438,23 +2245,16 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -2462,13 +2262,11 @@ golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -2505,7 +2303,6 @@ golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= @@ -2513,18 +2310,11 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20201008143054-e3b2a7f2fdc7 h1:2/QncOxxpPAdiH+E00abYw/SaQG353gltz79Nl1zrYE= -golang.org/x/exp v0.0.0-20201008143054-e3b2a7f2fdc7/go.mod h1:1phAWC201xIgDyaFpmDeZkgf70Q4Pd/CNqfRtVPtxNw= +golang.org/x/exp v0.0.0-20220328175248-053ad81199eb h1:pC9Okm6BVmxEw76PUu0XUbOTQ92JX11hfvqTjAV3qxM= +golang.org/x/exp v0.0.0-20220328175248-053ad81199eb/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -2547,23 +2337,19 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 h1:LQmS1nU0twXLA96Kt7U9qtHJEbBk3z6Q0V4UXjZkpr4= golang.org/x/net v0.0.0-20170324220409-6c2325251549/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2571,9 +2357,7 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -2587,7 +2371,6 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2635,11 +2418,10 @@ golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2657,7 +2439,7 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2677,7 +2459,6 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2689,7 +2470,6 @@ golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2697,7 +2477,6 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2714,7 +2493,6 @@ golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2780,7 +2558,6 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2845,7 +2622,6 @@ golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2863,15 +2639,12 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190912185636-87d9f09c5d89/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2900,8 +2673,6 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200410194907-79a7a3126eef/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -2917,9 +2688,7 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124202034-299f270db459/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201202200335-bef1c476418a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -2943,17 +2712,12 @@ gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJ gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3 h1:DnoIG+QAMaF5NvxnGe/oKsgKcAc6PcUyl8q0VetfQ8s= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -2991,7 +2755,6 @@ google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3h google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -3002,11 +2765,7 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -3083,8 +2842,6 @@ google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -3137,11 +2894,11 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -3159,10 +2916,8 @@ gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8 gopkg.in/go-playground/validator.v8 v8.18.1/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= -gopkg.in/gormigrate.v1 v1.6.0/go.mod h1:Lf00lQrHqfSYWiTtPcyQabsDdM6ejZaMgV0OU6JMSlw= gopkg.in/guregu/null.v2 v2.1.2 h1:YOuepWdYqGnrenzPyMi+ybCjeDzjdazynbwsXXOk4i8= gopkg.in/guregu/null.v2 v2.1.2/go.mod h1:XORrx8tyS5ZDcyUboCIxQtta/Aujk/6pfWrn9Xe33mU= -gopkg.in/guregu/null.v3 v3.5.0/go.mod h1:E4tX2Qe3h7QdL+uZ3a0vqvYwKQsRSQKM5V4YltdgH9Y= gopkg.in/guregu/null.v4 v4.0.0 h1:1Wm3S1WEA2I26Kq+6vcW+w0gcDo44YKYD7YIEJNHDjg= gopkg.in/guregu/null.v4 v4.0.0/go.mod h1:YoQhUrADuG3i9WqesrCmpNRwm1ypAgSHYqoOcTu/JrI= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= @@ -3173,11 +2928,6 @@ gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -3185,7 +2935,6 @@ gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= -gopkg.in/redis.v4 v4.2.4/go.mod h1:8KREHdypkCEojGKQcjMqAODMICIVwZAONWq8RowTITA= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -3218,7 +2967,6 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -3227,7 +2975,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.6/go.mod h1:pyyisuGw24ruLjrr1ddx39WE0y9OooInRzEYLhQB2YY= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= @@ -3395,5 +3142,3 @@ sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/integration-tests/README.md b/integration-tests/README.md index 2d89bd8c312..ea39b5fd140 100644 --- a/integration-tests/README.md +++ b/integration-tests/README.md @@ -12,9 +12,13 @@ increase minikube's resources significantly, or get a more substantial cluster. This is necessary to deploy ephemeral testing environments, which include external adapters, chainlink nodes and their DBs, as well as some simulated blockchains, all depending on the types of tests and networks being used. -### Running +## Install Ginkgo + +[Ginkgo](https://onsi.github.io/ginkgo/) is the testing framework we use to compile and run our tests. It comes with a lot of handy testing setups and goodies on top of the standard Go testing packages. -Our suggested way to run these tests is to use [the ginkgo cli](https://onsi.github.io/ginkgo/#the-ginkgo-cli). +`go install github.com/onsi/ginkgo/v2/ginkgo` + +### Running The default for this repo is the utilize the Makefile. @@ -40,14 +44,4 @@ or set environment variables that are all caps versions of the values found in t chainlink_image: # Image of chainlink node chainlink_version: # Version of the image on the chainlink node chainlink_env_values: # Environment values to pass onto the chainlink nodes - -# Specify the image and version of the simulated geth image you want to run tests against. Leave blank for default. -# Has no effect when running tests on networks other than the simulated geth instances. -geth_image: # Image of the simulated geth to use -geth_version: # Version of the geth image -geth_args: # List of CLI arguments to pass to simulated geth image. WARNING ``` - -### WARNING - -Values passed into `geth_args` will fully REPLACE all existing defaults we use in our launch. This enables freedom from defaults, but you should most definitely look at all the [current defaults](https://github.com/smartcontractkit/helmenv/blob/master/charts/geth/values.yaml#L16) we usually use and replace them as necessary. diff --git a/integration-tests/framework.yaml b/integration-tests/framework.yaml index ca2dbfdb64b..77c45551a91 100644 --- a/integration-tests/framework.yaml +++ b/integration-tests/framework.yaml @@ -12,12 +12,6 @@ chainlink_image: chainlink_version: chainlink_env_values: -# Specify the image and version of the simulated geth image you want to run tests against. Leave blank for default. -# Has no effect when running tests on networks other than the simulated geth instances. -geth_image: -geth_version: -geth_args: - # Setting an environment file allows for persistent, not ephemeral environments on test execution # # For example, if an environment is created with helmenv CLI, then the YAML file outputted on creation can be diff --git a/integration-tests/go.mod b/integration-tests/go.mod index b276c0cc9ec..5881cca7754 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -1,15 +1,15 @@ module github.com/smartcontractkit/chainlink/integration-tests -go 1.17 +go 1.18 require ( github.com/ethereum/go-ethereum v1.10.16 github.com/onsi/ginkgo/v2 v2.1.3 - github.com/onsi/gomega v1.18.1 + github.com/onsi/gomega v1.19.0 github.com/rs/zerolog v1.26.1 github.com/satori/go.uuid v1.2.0 - github.com/smartcontractkit/helmenv v1.0.38 - github.com/smartcontractkit/integrations-framework v1.0.50 + github.com/smartcontractkit/helmenv v1.0.60 + github.com/smartcontractkit/integrations-framework v1.1.10 ) require ( @@ -31,17 +31,18 @@ require ( github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect + github.com/avast/retry-go v3.0.0+incompatible // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/btcsuite/btcd v0.22.0-beta // indirect github.com/cavaliercoder/grab v2.0.0+incompatible // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect - github.com/containerd/containerd v1.5.9 // indirect + github.com/containerd/containerd v1.5.10 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set v1.8.0 // indirect github.com/docker/cli v20.10.11+incompatible // indirect - github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/docker v20.10.12+incompatible // indirect github.com/docker/docker-credential-helpers v0.6.4 // indirect github.com/docker/go-connections v0.4.0 // indirect @@ -63,7 +64,7 @@ require ( github.com/go-stack/stack v1.8.1 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.0.0 // indirect + github.com/golang-jwt/jwt/v4 v4.3.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/go-cmp v0.5.6 // indirect @@ -79,14 +80,14 @@ require ( github.com/huandu/xstrings v1.3.2 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/jmoiron/sqlx v1.3.4 // indirect + github.com/jmoiron/sqlx v1.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/klauspost/compress v1.13.6 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect - github.com/lib/pq v1.10.4 // indirect + github.com/lib/pq v1.10.5 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/magiconair/properties v1.8.5 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -114,7 +115,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/common v0.33.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/rjeczalik/notify v0.9.2 // indirect @@ -123,6 +124,7 @@ require ( github.com/shirou/gopsutil v3.21.10+incompatible // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.8.1 // indirect + github.com/slack-go/slack v0.10.2 // indirect github.com/smartcontractkit/libocr v0.0.0-20220121130134-5d2b1d5f424b // indirect github.com/spf13/afero v1.6.0 // indirect github.com/spf13/cast v1.4.1 // indirect @@ -130,7 +132,7 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.10.1 // indirect - github.com/stretchr/testify v1.7.0 // indirect + github.com/stretchr/testify v1.7.1 // indirect github.com/subosito/gotenv v1.2.0 // indirect github.com/tklauser/go-sysconf v0.3.9 // indirect github.com/tklauser/numcpus v0.3.0 // indirect @@ -142,8 +144,8 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e // indirect - golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect @@ -160,17 +162,17 @@ require ( gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - helm.sh/helm/v3 v3.8.0 // indirect - k8s.io/api v0.23.4 // indirect - k8s.io/apiextensions-apiserver v0.23.1 // indirect - k8s.io/apimachinery v0.23.4 // indirect - k8s.io/apiserver v0.23.1 // indirect - k8s.io/cli-runtime v0.23.4 // indirect - k8s.io/client-go v0.23.4 // indirect - k8s.io/component-base v0.23.4 // indirect + helm.sh/helm/v3 v3.8.1 // indirect + k8s.io/api v0.23.5 // indirect + k8s.io/apiextensions-apiserver v0.23.4 // indirect + k8s.io/apimachinery v0.23.5 // indirect + k8s.io/apiserver v0.23.4 // indirect + k8s.io/cli-runtime v0.23.5 // indirect + k8s.io/client-go v0.23.5 // indirect + k8s.io/component-base v0.23.5 // indirect k8s.io/klog/v2 v2.30.0 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect - k8s.io/kubectl v0.23.4 // indirect + k8s.io/kubectl v0.23.5 // indirect k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect oras.land/oras-go v1.1.0 // indirect sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index c0fc6708d5f..a96666dd780 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -217,6 +217,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= +github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -373,8 +375,9 @@ github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09Zvgq github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/containerd v1.5.10 h1:3cQ2uRVCkJVcx5VombsE7105Gl9Wrl7ORAO3+4+ogf4= +github.com/containerd/containerd v1.5.10/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -504,8 +507,9 @@ github.com/docker/cli v20.10.11+incompatible h1:tXU1ezXcruZQRrMP8RN2z9N91h+6egZT github.com/docker/cli v20.10.11+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -642,9 +646,11 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= @@ -683,12 +689,15 @@ github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5Nq github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= @@ -725,8 +734,9 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= +github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= @@ -1046,8 +1056,9 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w= github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -1136,8 +1147,9 @@ github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ= +github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= @@ -1562,7 +1574,6 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -1578,8 +1589,8 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw= @@ -1692,8 +1703,9 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE= +github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1795,12 +1807,14 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/slack-go/slack v0.10.2 h1:KMN/h2sgUninHXvQI8PrR/PHBUuWp2NPvz2Kr66tki4= +github.com/slack-go/slack v0.10.2/go.mod h1:5FLdBRv7VW/d9EBxx/eEktOptWygbA9K2QK/KW7ds1s= github.com/smartcontractkit/chainlink v0.8.10-0.20200825114219-81dd2fc95bac/go.mod h1:j7qIYHGCN4QqMXdO8g8A9dmUT5vKFmkxPSbjAIfrfNU= github.com/smartcontractkit/chainlink v0.9.5-0.20201207211610-6c7fee37d5b7/go.mod h1:kmdLJbVZRCnBLiL6gG+U+1+0ofT3bB48DOF8tjQvcoI= -github.com/smartcontractkit/helmenv v1.0.38 h1:9BcFn2fEVm5pjm3LqI8ANLiL6i5yF82JH6vCaNqvS88= -github.com/smartcontractkit/helmenv v1.0.38/go.mod h1:Co8Gvpy0In6lrMxHQJOS0oZ8XzFs25Sfg5+cpd2kcKI= -github.com/smartcontractkit/integrations-framework v1.0.50 h1:r9f0pRWGOT4CcPUx6XAf+LRON8SDbda3zbf2kR1O8WQ= -github.com/smartcontractkit/integrations-framework v1.0.50/go.mod h1:IZyYezzgpwa1Ir3iZMjAnJfwg+pJB5kyExBiwZqQe9c= +github.com/smartcontractkit/helmenv v1.0.60 h1:c1FDBjFn+LHv8x0NbGsci3FziZF9208C9ulbi0O7kcY= +github.com/smartcontractkit/helmenv v1.0.60/go.mod h1:VbfetB06uItvuxJKdZ6AftpbATYCnT40IaKRr75ebcs= +github.com/smartcontractkit/integrations-framework v1.1.10 h1:vtqU59FOcBSRZoggrsp5bN1ySy3ga9sCUitZmJpdLj4= +github.com/smartcontractkit/integrations-framework v1.1.10/go.mod h1:GdTraG7UB8hq65cNkEKT5DO6mdzUeiEdWBAL9p54le4= github.com/smartcontractkit/libocr v0.0.0-20201203233047-5d9b24f0cbb5/go.mod h1:bfdSuLnBWCkafDvPGsQ1V6nrXhg046gh227MKi4zkpc= github.com/smartcontractkit/libocr v0.0.0-20220121130134-5d2b1d5f424b h1:9xEvwk6fHqL9Fp/u8bLd7kbEQKrDSzwuDsH1ptHjE9g= github.com/smartcontractkit/libocr v0.0.0-20220121130134-5d2b1d5f424b/go.mod h1:nq3crM3wVqnyMlM/4ZydTuJ/WyCapAsOt7P94oRgSPg= @@ -1878,8 +1892,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -2231,8 +2246,10 @@ golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d h1:62NvYBuaanGXR2ZOfwDFkhhl6X1DUgf8qg3GuQvxZsE= golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2252,8 +2269,9 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2786,8 +2804,8 @@ gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= -helm.sh/helm/v3 v3.8.0 h1:vlQQDDQkrH4NECOFbGcwjjKyHL5Sa3xNLjMxXm7fMVo= -helm.sh/helm/v3 v3.8.0/go.mod h1:0nYPSuvuj8TTJDLRSAfbzGGbazPZsayaDpP8s9FfZT8= +helm.sh/helm/v3 v3.8.1 h1:J1EzhvtvKJRdx9skjUVe5xPN7KK2VA1mVxiQ9Ic5+oU= +helm.sh/helm/v3 v3.8.1/go.mod h1:Nm0Z2ciZFFvR9cRKpiRE2SMhJTgqY0b+ezT2cDcyqNw= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2801,42 +2819,42 @@ honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.23.1/go.mod h1:WfXnOnwSqNtG62Y1CdjoMxh7r7u9QXGCkA1u0na2jgo= -k8s.io/api v0.23.4 h1:85gnfXQOWbJa1SiWGpE9EEtHs0UVvDyIsSMpEtl2D4E= k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= -k8s.io/apiextensions-apiserver v0.23.1 h1:xxE0q1vLOVZiWORu1KwNRQFsGWtImueOrqSl13sS5EU= -k8s.io/apiextensions-apiserver v0.23.1/go.mod h1:0qz4fPaHHsVhRApbtk3MGXNn2Q9M/cVWWhfHdY2SxiM= +k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA= +k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8= +k8s.io/apiextensions-apiserver v0.23.4 h1:AFDUEu/yEf0YnuZhqhIFhPLPhhcQQVuR1u3WCh0rveU= +k8s.io/apiextensions-apiserver v0.23.4/go.mod h1:TWYAKymJx7nLMxWCgWm2RYGXHrGlVZnxIlGnvtfYu+g= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= -k8s.io/apimachinery v0.23.4 h1:fhnuMd/xUL3Cjfl64j5ULKZ1/J9n8NuQEgNL+WXWfdM= k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0= +k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/apiserver v0.23.1 h1:vWGf8LcV9Pk/z5rdLmCiBDqE21ccbe930dzrtVMhw9g= -k8s.io/apiserver v0.23.1/go.mod h1:Bqt0gWbeM2NefS8CjWswwd2VNAKN6lUKR85Ft4gippY= -k8s.io/cli-runtime v0.23.1/go.mod h1:r9r8H/qfXo9w+69vwUL7LokKlLRKW5D6A8vUKCx+YL0= -k8s.io/cli-runtime v0.23.4 h1:C3AFQmo4TK4dlVPLOI62gtHEHu0OfA2Cp4UVRZ1JXns= +k8s.io/apiserver v0.23.4 h1:zNvQlG+C/ERjuUz4p7eY/0IWHaMixRSBoxgmyIdwo9Y= +k8s.io/apiserver v0.23.4/go.mod h1:A6l/ZcNtxGfPSqbFDoxxOjEjSKBaQmE+UTveOmMkpNc= k8s.io/cli-runtime v0.23.4/go.mod h1:7KywUNTUibmHPqmpDFuRO1kc9RhsufHv2lkjCm2YZyM= +k8s.io/cli-runtime v0.23.5 h1:Z7XUpGoJZYZB2uNjQfJjMbyDKyVkoBGye62Ap0sWQHY= +k8s.io/cli-runtime v0.23.5/go.mod h1:oY6QDF2qo9xndSq32tqcmRp2UyXssdGrLfjAVymgbx4= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.23.1/go.mod h1:6QSI8fEuqD4zgFK0xbdwfB/PthBsIxCJMa3s17WlcO0= -k8s.io/client-go v0.23.4 h1:YVWvPeerA2gpUudLelvsolzH7c2sFoXXR5wM/sWqNFU= k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= +k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8= +k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= -k8s.io/code-generator v0.23.1/go.mod h1:V7yn6VNTCWW8GqodYCESVo95fuiEg713S8B7WacWZDA= k8s.io/code-generator v0.23.4/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= +k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/component-base v0.23.1/go.mod h1:6llmap8QtJIXGDd4uIWJhAq0Op8AtQo6bDW2RrNMTeo= -k8s.io/component-base v0.23.4 h1:SziYh48+QKxK+ykJ3Ejqd98XdZIseVBG7sBaNLPqy6M= k8s.io/component-base v0.23.4/go.mod h1:8o3Gg8i2vnUXGPOwciiYlkSaZT+p+7gA9Scoz8y4W4E= -k8s.io/component-helpers v0.23.1/go.mod h1:ZK24U+2oXnBPcas2KolLigVVN9g5zOzaHLkHiQMFGr0= +k8s.io/component-base v0.23.5 h1:8qgP5R6jG1BBSXmRYW+dsmitIrpk8F/fPEvgDenMCCE= +k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0= k8s.io/component-helpers v0.23.4/go.mod h1:1Pl7L4zukZ054ElzRbvmZ1FJIU8roBXFOeRFu8zipa4= +k8s.io/component-helpers v0.23.5/go.mod h1:5riXJgjTIs+ZB8xnf5M2anZ8iQuq37a0B/0BgoPQuSM= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= @@ -2855,15 +2873,14 @@ k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAG k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kubectl v0.23.1/go.mod h1:Ui7dJKdUludF8yWAOSN7JZEkOuYixX5yF6E6NjoukKE= -k8s.io/kubectl v0.23.4 h1:mAa+zEOlyZieecEy+xSrhjkpMcukYyHWzcNdX28dzMY= k8s.io/kubectl v0.23.4/go.mod h1:Dgb0Rvx/8JKS/C2EuvsNiQc6RZnX0SbHJVG3XUzH6ok= +k8s.io/kubectl v0.23.5 h1:DmDULqCaF4qstj0Im143XmncvqWtJxHzK8IrW2BzlU0= +k8s.io/kubectl v0.23.5/go.mod h1:lLgw7cVY8xbd7o637vOXPca/w6HC205KsPCRDYRCxwE= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/metrics v0.23.1/go.mod h1:qXvsM1KANrc+ZZeFwj6Phvf0NLiC+d3RwcsLcdGc+xs= k8s.io/metrics v0.23.4/go.mod h1:cl6sY9BdVT3DubbpqnkPIKi6mn/F2ltkU4yH1tEJ3Bo= +k8s.io/metrics v0.23.5/go.mod h1:WNAtV2a5BYbmDS8+7jSqYYV6E3efuGTpIwJ8PTD1wgs= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= oras.land/oras-go v1.1.0 h1:tfWM1RT7PzUwWphqHU6ptPU3ZhwVnSw/9nEGf519rYg= @@ -2874,7 +2891,7 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27/go.mod h1:tq2nT0Kx7W+/f2JVE+zxYtUhdjuELJkVpNz+x/QN5R4= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/kustomize/api v0.10.1 h1:KgU7hfYoscuqag84kxtzKdEC3mKMb99DPI3a0eaV1d0= @@ -2886,7 +2903,6 @@ sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLC sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/integration-tests/smoke/cron_test.go b/integration-tests/smoke/cron_test.go index 41288369aee..6a322ec0cf4 100644 --- a/integration-tests/smoke/cron_test.go +++ b/integration-tests/smoke/cron_test.go @@ -1,4 +1,4 @@ -package smoke_test +package smoke //revive:disable:dot-imports import ( @@ -11,66 +11,72 @@ import ( "github.com/smartcontractkit/helmenv/tools" "github.com/smartcontractkit/integrations-framework/actions" "github.com/smartcontractkit/integrations-framework/client" + "github.com/smartcontractkit/integrations-framework/config" "github.com/smartcontractkit/integrations-framework/utils" ) var _ = Describe("Cronjob suite @cron", func() { var ( - err error - job *client.Job - cls []client.Chainlink - mockserver *client.MockserverClient - e *environment.Environment + err error + job *client.Job + chainlinkNode client.Chainlink + mockserver *client.MockserverClient + e *environment.Environment ) BeforeEach(func() { By("Deploying the environment", func() { e, err = environment.DeployOrLoadEnvironment( - environment.NewChainlinkConfig(nil, ""), + environment.NewChainlinkConfig( + config.ChainlinkVals(), + "chainlink-cron-core-ci", + config.GethNetworks()..., + ), tools.ChartsRoot, ) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Environment deployment shouldn't fail") err = e.ConnectAll() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to all nodes shouldn't fail") }) By("Connecting to launched resources", func() { - cls, err = client.ConnectChainlinkNodes(e) - Expect(err).ShouldNot(HaveOccurred()) + cls, err := client.ConnectChainlinkNodes(e) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to chainlink nodes shouldn't fail") mockserver, err = client.ConnectMockServer(e) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Creating mockserver client shouldn't fail") + chainlinkNode = cls[0] }) By("Adding cron job to a node", func() { err = mockserver.SetValuePath("/variable", 5) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Setting value path in mockserver shouldn't fail") bta := client.BridgeTypeAttributes{ Name: fmt.Sprintf("variable-%s", uuid.NewV4().String()), URL: fmt.Sprintf("%s/variable", mockserver.Config.ClusterURL), RequestData: "{}", } - err = cls[0].CreateBridge(&bta) - Expect(err).ShouldNot(HaveOccurred()) + err = chainlinkNode.CreateBridge(&bta) + Expect(err).ShouldNot(HaveOccurred(), "Creating bridge in chainlink node shouldn't fail") - job, err = cls[0].CreateJob(&client.CronJobSpec{ + job, err = chainlinkNode.CreateJob(&client.CronJobSpec{ Schedule: "CRON_TZ=UTC * * * * * *", ObservationSource: client.ObservationSourceSpecBridge(bta), }) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Creating Cron Job in chainlink node shouldn't fail") }) }) Describe("with Cron job", func() { It("runs 5 or more times with no errors", func() { Eventually(func(g Gomega) { - jobRuns, err := cls[0].ReadRunsByJob(job.Data.ID) - g.Expect(err).ShouldNot(HaveOccurred()) + jobRuns, err := chainlinkNode.ReadRunsByJob(job.Data.ID) + g.Expect(err).ShouldNot(HaveOccurred(), "Reading Job run data shouldn't fail") - g.Expect(len(jobRuns.Data)).Should(BeNumerically(">=", 5)) + g.Expect(len(jobRuns.Data)).Should(BeNumerically(">=", 5), "Expected number of job runs to be greater than 5, but got %d", len(jobRuns.Data)) for _, jr := range jobRuns.Data { - g.Expect(jr.Attributes.Errors).Should(Equal([]interface{}{nil})) + g.Expect(jr.Attributes.Errors).Should(Equal([]interface{}{nil}), "Job run %s shouldn't have errors", jr.ID) } }, "2m", "1s").Should(Succeed()) }) @@ -78,8 +84,11 @@ var _ = Describe("Cronjob suite @cron", func() { AfterEach(func() { By("Tearing down the environment", func() { - err = actions.TeardownSuite(e, nil, utils.ProjectRoot, nil) - Expect(err).ShouldNot(HaveOccurred()) + networkRegistry := client.NewDefaultNetworkRegistry() + networks, err := networkRegistry.GetNetworks(e) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to blockchain nodes shouldn't fail") + err = actions.TeardownSuite(e, networks, utils.ProjectRoot, []client.Chainlink{chainlinkNode}, nil) + Expect(err).ShouldNot(HaveOccurred(), "Environment teardown shouldn't fail") }) }) }) diff --git a/integration-tests/smoke/flux_test.go b/integration-tests/smoke/flux_test.go index 910388cf324..8ca713f4980 100644 --- a/integration-tests/smoke/flux_test.go +++ b/integration-tests/smoke/flux_test.go @@ -1,4 +1,4 @@ -package smoke_test +package smoke //revive:disable:dot-imports import ( @@ -13,6 +13,7 @@ import ( "github.com/smartcontractkit/helmenv/environment" "github.com/smartcontractkit/helmenv/tools" "github.com/smartcontractkit/integrations-framework/actions" + "github.com/smartcontractkit/integrations-framework/config" "github.com/smartcontractkit/integrations-framework/utils" . "github.com/onsi/ginkgo/v2" @@ -26,66 +27,78 @@ var _ = Describe("Flux monitor suite @flux", func() { var ( err error nets *client.Networks + defaultNetwork client.BlockchainClient cd contracts.ContractDeployer lt contracts.LinkToken fluxInstance contracts.FluxAggregator - cls []client.Chainlink + chainlinkNodes []client.Chainlink mockserver *client.MockserverClient nodeAddresses []common.Address adapterPath string adapterUUID string fluxRoundTimeout = 2 * time.Minute - e *environment.Environment + env *environment.Environment ) BeforeEach(func() { By("Deploying the environment", func() { - e, err = environment.DeployOrLoadEnvironment( - environment.NewChainlinkConfig(environment.ChainlinkReplicas(3, nil), ""), + env, err = environment.DeployOrLoadEnvironment( + environment.NewChainlinkConfig( + environment.ChainlinkReplicas(3, config.ChainlinkVals()), + "chainlink-flux-core-ci", + config.GethNetworks()..., + ), tools.ChartsRoot, ) - Expect(err).ShouldNot(HaveOccurred()) - err = e.ConnectAll() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Environment deployment shouldn't fail") + err = env.ConnectAll() + Expect(err).ShouldNot(HaveOccurred(), "Connecting to all nodes shouldn't fail") }) By("Connecting to launched resources", func() { - networkRegistry := client.NewNetworkRegistry() - nets, err = networkRegistry.GetNetworks(e) - Expect(err).ShouldNot(HaveOccurred()) - cd, err = contracts.NewContractDeployer(nets.Default) - Expect(err).ShouldNot(HaveOccurred()) - cls, err = client.ConnectChainlinkNodes(e) - Expect(err).ShouldNot(HaveOccurred()) - nodeAddresses, err = actions.ChainlinkNodeAddresses(cls) - Expect(err).ShouldNot(HaveOccurred()) - mockserver, err = client.ConnectMockServer(e) - Expect(err).ShouldNot(HaveOccurred()) - nets.Default.ParallelTransactions(true) + networkRegistry := client.NewDefaultNetworkRegistry() + nets, err = networkRegistry.GetNetworks(env) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to blockchain nodes shouldn't fail") + defaultNetwork = nets.Default + + cd, err = contracts.NewContractDeployer(defaultNetwork) + Expect(err).ShouldNot(HaveOccurred(), "Deploying contracts shouldn't fail") + chainlinkNodes, err = client.ConnectChainlinkNodes(env) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to chainlink nodes shouldn't fail") + nodeAddresses, err = actions.ChainlinkNodeAddresses(chainlinkNodes) + Expect(err).ShouldNot(HaveOccurred(), "Retreiving on-chain wallet addresses for chainlink nodes shouldn't fail") + mockserver, err = client.ConnectMockServer(env) + Expect(err).ShouldNot(HaveOccurred(), "Creating mock server client shouldn't fail") + + defaultNetwork.ParallelTransactions(true) }) By("Setting initial adapter value", func() { adapterUUID = uuid.NewV4().String() adapterPath = fmt.Sprintf("/variable-%s", adapterUUID) err = mockserver.SetValuePath(adapterPath, 1e5) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Setting mockserver value path shouldn't fail") }) By("Deploying and funding contract", func() { lt, err = cd.DeployLinkTokenContract() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying Link Token Contract shouldn't fail") fluxInstance, err = cd.DeployFluxAggregatorContract(lt.Address(), contracts.DefaultFluxAggregatorOptions()) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying Flux Aggregator Contract shouldn't fail") + err = defaultNetwork.WaitForEvents() + Expect(err).ShouldNot(HaveOccurred(), "Failed waiting for deployment of flux aggregator contract") + err = lt.Transfer(fluxInstance.Address(), big.NewInt(1e18)) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Funding Flux Aggregator Contract shouldn't fail") + err = defaultNetwork.WaitForEvents() + Expect(err).ShouldNot(HaveOccurred(), "Failed waiting for funding of flux aggregator contract") + err = fluxInstance.UpdateAvailableFunds() - Expect(err).ShouldNot(HaveOccurred()) - err = nets.Default.WaitForEvents() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Updating the available funds on the Flux Aggregator Contract shouldn't fail") }) By("Funding Chainlink nodes", func() { - err = actions.FundChainlinkNodes(cls, nets.Default, big.NewFloat(1)) - Expect(err).ShouldNot(HaveOccurred()) + err = actions.FundChainlinkNodes(chainlinkNodes, defaultNetwork, big.NewFloat(1)) + Expect(err).ShouldNot(HaveOccurred(), "Funding chainlink nodes with ETH shouldn't fail") }) By("Setting oracle options", func() { @@ -98,11 +111,11 @@ var _ = Describe("Flux monitor suite @flux", func() { MaxSubmissions: 3, RestartDelayRounds: 0, }) - Expect(err).ShouldNot(HaveOccurred()) - err = nets.Default.WaitForEvents() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Setting oracle options in the Flux Aggregator contract shouldn't fail") + err = defaultNetwork.WaitForEvents() + Expect(err).ShouldNot(HaveOccurred(), "Waiting for event subscriptions in nodes shouldn't fail") oracles, err := fluxInstance.GetOracles(context.Background()) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Getting oracle details from the Flux aggregator contract shouldn't fail") log.Info().Str("Oracles", strings.Join(oracles, ",")).Msg("Oracles set") }) @@ -112,9 +125,9 @@ var _ = Describe("Flux monitor suite @flux", func() { Name: fmt.Sprintf("variable-%s", adapterUUID), URL: adapterFullURL, } - for _, n := range cls { + for i, n := range chainlinkNodes { err = n.CreateBridge(&bta) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Creating bridge shouldn't fail for node %d", i+1) fluxSpec := &client.FluxMonitorJobSpec{ Name: fmt.Sprintf("flux-monitor-%s", adapterUUID), @@ -126,7 +139,7 @@ var _ = Describe("Flux monitor suite @flux", func() { ObservationSource: client.ObservationSourceSpecBridge(bta), } _, err = n.CreateJob(fluxSpec) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Creating flux job shouldn't fail for node %d", i+1) } }) }) @@ -135,47 +148,47 @@ var _ = Describe("Flux monitor suite @flux", func() { It("performs two rounds and has withdrawable payments for oracles", func() { // initial value set is performed before jobs creation fluxRound := contracts.NewFluxAggregatorRoundConfirmer(fluxInstance, big.NewInt(1), fluxRoundTimeout) - nets.Default.AddHeaderEventSubscription(fluxInstance.Address(), fluxRound) - err = nets.Default.WaitForEvents() - Expect(err).ShouldNot(HaveOccurred()) + defaultNetwork.AddHeaderEventSubscription(fluxInstance.Address(), fluxRound) + err = defaultNetwork.WaitForEvents() + Expect(err).ShouldNot(HaveOccurred(), "Waiting for event subscriptions in nodes shouldn't fail") data, err := fluxInstance.GetContractData(context.Background()) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Getting contract data from flux aggregator contract shouldn't fail") log.Info().Interface("Data", data).Msg("Round data") - Expect(data.LatestRoundData.Answer.Int64()).Should(Equal(int64(1e5))) - Expect(data.LatestRoundData.RoundId.Int64()).Should(Equal(int64(1))) - Expect(data.LatestRoundData.AnsweredInRound.Int64()).Should(Equal(int64(1))) - Expect(data.AvailableFunds.Int64()).Should(Equal(int64(999999999999999997))) - Expect(data.AllocatedFunds.Int64()).Should(Equal(int64(3))) + Expect(data.LatestRoundData.Answer.Int64()).Should(Equal(int64(1e5)), "Expected latest round answer to be %d, but found %d", int64(1e5), data.LatestRoundData.Answer.Int64()) + Expect(data.LatestRoundData.RoundId.Int64()).Should(Equal(int64(1)), "Expected latest round id to be %d, but found %d", int64(1), data.LatestRoundData.RoundId.Int64()) + Expect(data.LatestRoundData.AnsweredInRound.Int64()).Should(Equal(int64(1)), "Expected latest round's answered in round to be %d, but found %d", int64(1), data.LatestRoundData.AnsweredInRound.Int64()) + Expect(data.AvailableFunds.Int64()).Should(Equal(int64(999999999999999997)), "Expected available funds to be %d, but found %d", int64(999999999999999997), data.AvailableFunds.Int64()) + Expect(data.AllocatedFunds.Int64()).Should(Equal(int64(3)), "Expected allocated funds to be %d, but found %d", int64(3), data.AllocatedFunds.Int64()) fluxRound = contracts.NewFluxAggregatorRoundConfirmer(fluxInstance, big.NewInt(2), fluxRoundTimeout) - nets.Default.AddHeaderEventSubscription(fluxInstance.Address(), fluxRound) + defaultNetwork.AddHeaderEventSubscription(fluxInstance.Address(), fluxRound) err = mockserver.SetValuePath(adapterPath, 1e10) - Expect(err).ShouldNot(HaveOccurred()) - err = nets.Default.WaitForEvents() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Setting value path in mock server shouldn't fail") + err = defaultNetwork.WaitForEvents() + Expect(err).ShouldNot(HaveOccurred(), "Waiting for event subscriptions in nodes shouldn't fail") data, err = fluxInstance.GetContractData(context.Background()) - Expect(err).ShouldNot(HaveOccurred()) - Expect(data.LatestRoundData.Answer.Int64()).Should(Equal(int64(1e10))) - Expect(data.LatestRoundData.RoundId.Int64()).Should(Equal(int64(2))) - Expect(data.LatestRoundData.AnsweredInRound.Int64()).Should(Equal(int64(2))) - Expect(data.AvailableFunds.Int64()).Should(Equal(int64(999999999999999994))) - Expect(data.AllocatedFunds.Int64()).Should(Equal(int64(6))) + Expect(err).ShouldNot(HaveOccurred(), "Getting contract data from flux aggregator contract shouldn't fail") + Expect(data.LatestRoundData.Answer.Int64()).Should(Equal(int64(1e10)), "Expected latest round answer to be %d, but found %d", int64(1e10), data.LatestRoundData.Answer.Int64()) + Expect(data.LatestRoundData.RoundId.Int64()).Should(Equal(int64(2)), "Expected latest round id to be %d, but found %d", int64(2), data.LatestRoundData.RoundId.Int64()) + Expect(data.LatestRoundData.AnsweredInRound.Int64()).Should(Equal(int64(2)), "Expected latest round's answered in round to be %d, but found %d", int64(2), data.LatestRoundData.AnsweredInRound.Int64()) + Expect(data.AvailableFunds.Int64()).Should(Equal(int64(999999999999999994)), "Expected available funds to be %d, but found %d", int64(999999999999999994), data.AvailableFunds.Int64()) + Expect(data.AllocatedFunds.Int64()).Should(Equal(int64(6)), "Expected allocated funds to be %d, but found %d", int64(6), data.AllocatedFunds.Int64()) log.Info().Interface("data", data).Msg("Round data") for _, oracleAddr := range nodeAddresses { payment, _ := fluxInstance.WithdrawablePayment(context.Background(), oracleAddr) - Expect(payment.Int64()).Should(Equal(int64(2))) + Expect(payment.Int64()).Should(Equal(int64(2)), "Expected flux aggregator contract's withdrawable payment to be %d, but found %d", int64(2), payment.Int64()) } }) }) AfterEach(func() { By("Printing gas stats", func() { - nets.Default.GasStats().PrintStats() + defaultNetwork.GasStats().PrintStats() }) By("Tearing down the environment", func() { - err = actions.TeardownSuite(e, nets, utils.ProjectRoot, nil) - Expect(err).ShouldNot(HaveOccurred()) + err = actions.TeardownSuite(env, nets, utils.ProjectRoot, chainlinkNodes, nil) + Expect(err).ShouldNot(HaveOccurred(), "Environment teardown shouldn't fail") }) }) }) diff --git a/integration-tests/smoke/keeper_test.go b/integration-tests/smoke/keeper_test.go index a73c3fbabd5..bf361582258 100644 --- a/integration-tests/smoke/keeper_test.go +++ b/integration-tests/smoke/keeper_test.go @@ -1,11 +1,9 @@ -package smoke_test +package smoke //revive:disable:dot-imports import ( "context" - "math/big" - "github.com/ethereum/go-ethereum/common" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/rs/zerolog/log" @@ -13,6 +11,7 @@ import ( "github.com/smartcontractkit/helmenv/tools" "github.com/smartcontractkit/integrations-framework/actions" "github.com/smartcontractkit/integrations-framework/client" + "github.com/smartcontractkit/integrations-framework/config" "github.com/smartcontractkit/integrations-framework/contracts" "github.com/smartcontractkit/integrations-framework/utils" ) @@ -24,141 +23,63 @@ var _ = Describe("Keeper suite @keeper", func() { contractDeployer contracts.ContractDeployer registry contracts.KeeperRegistry consumer contracts.KeeperConsumer - checkGasLimit = uint32(2500000) linkToken contracts.LinkToken chainlinkNodes []client.Chainlink - nodeAddresses []common.Address env *environment.Environment ) BeforeEach(func() { By("Deploying the environment", func() { env, err = environment.DeployOrLoadEnvironment( - environment.NewChainlinkConfig(environment.ChainlinkReplicas(6, nil), ""), + environment.NewChainlinkConfig( + environment.ChainlinkReplicas(6, config.ChainlinkVals()), + "chainlink-keeper-core-ci", + config.GethNetworks()..., + ), tools.ChartsRoot, ) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Environment deployment shouldn't fail") err = env.ConnectAll() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to all nodes shouldn't fail") }) By("Connecting to launched resources", func() { - networkRegistry := client.NewNetworkRegistry() + networkRegistry := client.NewDefaultNetworkRegistry() networks, err = networkRegistry.GetNetworks(env) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to blockchain nodes shouldn't fail") contractDeployer, err = contracts.NewContractDeployer(networks.Default) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying contracts shouldn't fail") chainlinkNodes, err = client.ConnectChainlinkNodes(env) - Expect(err).ShouldNot(HaveOccurred()) - nodeAddresses, err = actions.ChainlinkNodeAddresses(chainlinkNodes) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to chainlink nodes shouldn't fail") networks.Default.ParallelTransactions(true) }) By("Funding Chainlink nodes", func() { txCost, err := networks.Default.EstimateCostForChainlinkOperations(10) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Estimating cost for Chainlink Operations shouldn't fail") err = actions.FundChainlinkNodes(chainlinkNodes, networks.Default, txCost) - Expect(err).ShouldNot(HaveOccurred()) - // Edge case where simulated networks need some funds at the 0x0 address in order for keeper reads to work - if networks.Default.GetNetworkType() == "eth_simulated" { - actions.FundAddresses(networks.Default, big.NewFloat(1), "0x0") - } + Expect(err).ShouldNot(HaveOccurred(), "Funding Chainlink nodes shouldn't fail") }) - By("Deploying Keeper contracts", func() { + By("Deploy Keeper Contracts", func() { linkToken, err = contractDeployer.DeployLinkTokenContract() - Expect(err).ShouldNot(HaveOccurred()) - ef, err := contractDeployer.DeployMockETHLINKFeed(big.NewInt(2e18)) - Expect(err).ShouldNot(HaveOccurred()) - gf, err := contractDeployer.DeployMockGasFeed(big.NewInt(2e11)) - Expect(err).ShouldNot(HaveOccurred()) - registry, err = contractDeployer.DeployKeeperRegistry( - &contracts.KeeperRegistryOpts{ - LinkAddr: linkToken.Address(), - ETHFeedAddr: ef.Address(), - GasFeedAddr: gf.Address(), - PaymentPremiumPPB: uint32(200000000), - BlockCountPerTurn: big.NewInt(3), - CheckGasLimit: checkGasLimit, - StalenessSeconds: big.NewInt(90000), - GasCeilingMultiplier: uint16(1), - FallbackGasPrice: big.NewInt(2e11), - FallbackLinkPrice: big.NewInt(2e18), - }, - ) - Expect(err).ShouldNot(HaveOccurred()) - err = linkToken.Transfer(registry.Address(), big.NewInt(1e18)) - Expect(err).ShouldNot(HaveOccurred()) - consumer, err = contractDeployer.DeployKeeperConsumer(big.NewInt(5)) - Expect(err).ShouldNot(HaveOccurred()) - err = linkToken.Transfer(consumer.Address(), big.NewInt(1e18)) - Expect(err).ShouldNot(HaveOccurred()) - err = networks.Default.WaitForEvents() - Expect(err).ShouldNot(HaveOccurred()) - }) + Expect(err).ShouldNot(HaveOccurred(), "Deploying Link Token Contract shouldn't fail") - By("Registering upkeep target", func() { - registrar, err := contractDeployer.DeployUpkeepRegistrationRequests( - linkToken.Address(), - big.NewInt(0), + r, consumers := actions.DeployKeeperContracts( + 1, + linkToken, + contractDeployer, + chainlinkNodes, + networks, ) - Expect(err).ShouldNot(HaveOccurred()) - err = registry.SetRegistrar(registrar.Address()) - Expect(err).ShouldNot(HaveOccurred()) - err = registrar.SetRegistrarConfig( - true, - uint32(999), - uint16(999), - registry.Address(), - big.NewInt(0), - ) - Expect(err).ShouldNot(HaveOccurred()) - req, err := registrar.EncodeRegisterRequest( - "upkeep_1", - []byte("0x1234"), - consumer.Address(), - checkGasLimit, - consumer.Address(), - []byte("0x"), - big.NewInt(9e18), - 0, - ) - Expect(err).ShouldNot(HaveOccurred()) - err = linkToken.TransferAndCall(registrar.Address(), big.NewInt(9e18), req) - Expect(err).ShouldNot(HaveOccurred()) - err = networks.Default.WaitForEvents() - Expect(err).ShouldNot(HaveOccurred()) + consumer = consumers[0] + registry = r }) - By("Adding Keepers and a job", func() { - primaryNode := chainlinkNodes[0] - primaryNodeAddress, err := primaryNode.PrimaryEthAddress() - Expect(err).ShouldNot(HaveOccurred()) - nodeAddressesStr := make([]string, 0) - for _, cla := range nodeAddresses { - nodeAddressesStr = append(nodeAddressesStr, cla.Hex()) - } - payees := []string{ - consumer.Address(), - consumer.Address(), - consumer.Address(), - consumer.Address(), - consumer.Address(), - consumer.Address(), - } - err = registry.SetKeepers(nodeAddressesStr, payees) - Expect(err).ShouldNot(HaveOccurred()) - _, err = primaryNode.CreateJob(&client.KeeperJobSpec{ - Name: "keeper-test-job", - ContractAddress: registry.Address(), - FromAddress: primaryNodeAddress, - MinIncomingConfirmations: 1, - ObservationSource: client.ObservationSourceKeeperDefault(), - }) - Expect(err).ShouldNot(HaveOccurred()) + By("Register Keeper Jobs", func() { + actions.CreateKeeperJobs(chainlinkNodes, registry) err = networks.Default.WaitForEvents() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Error creating keeper jobs") }) }) @@ -166,8 +87,8 @@ var _ = Describe("Keeper suite @keeper", func() { It("performs upkeep of a target contract", func() { Eventually(func(g Gomega) { cnt, err := consumer.Counter(context.Background()) - g.Expect(err).ShouldNot(HaveOccurred()) - g.Expect(cnt.Int64()).Should(BeNumerically(">", int64(0))) + g.Expect(err).ShouldNot(HaveOccurred(), "Calling consumer's Counter shouldn't fail") + g.Expect(cnt.Int64()).Should(BeNumerically(">", int64(0)), "Expected consumer counter to be greater than 0, but got %d", cnt.Int64()) log.Info().Int64("Upkeep counter", cnt.Int64()).Msg("Upkeeps performed") }, "2m", "1s").Should(Succeed()) }) @@ -178,8 +99,8 @@ var _ = Describe("Keeper suite @keeper", func() { networks.Default.GasStats().PrintStats() }) By("Tearing down the environment", func() { - err = actions.TeardownSuite(env, networks, utils.ProjectRoot, nil) - Expect(err).ShouldNot(HaveOccurred()) + err = actions.TeardownSuite(env, networks, utils.ProjectRoot, chainlinkNodes, nil) + Expect(err).ShouldNot(HaveOccurred(), "Environment teardown shouldn't fail") }) }) }) diff --git a/integration-tests/smoke/ocr_test.go b/integration-tests/smoke/ocr_test.go index fde8e855b3e..34a00ec1f28 100644 --- a/integration-tests/smoke/ocr_test.go +++ b/integration-tests/smoke/ocr_test.go @@ -1,4 +1,4 @@ -package smoke_test +package smoke //revive:disable:dot-imports import ( @@ -11,6 +11,7 @@ import ( "github.com/smartcontractkit/helmenv/tools" "github.com/smartcontractkit/integrations-framework/actions" "github.com/smartcontractkit/integrations-framework/client" + "github.com/smartcontractkit/integrations-framework/config" "github.com/smartcontractkit/integrations-framework/contracts" "github.com/smartcontractkit/integrations-framework/utils" ) @@ -30,33 +31,37 @@ var _ = Describe("OCR Feed @ocr", func() { BeforeEach(func() { By("Deploying the environment", func() { env, err = environment.DeployOrLoadEnvironment( - environment.NewChainlinkConfig(environment.ChainlinkReplicas(6, nil), ""), + environment.NewChainlinkConfig( + environment.ChainlinkReplicas(6, config.ChainlinkVals()), + "chainlink-ocr-core-ci", + config.GethNetworks()..., + ), tools.ChartsRoot, ) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Environment deployment shouldn't fail") err = env.ConnectAll() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to all nodes shouldn't fail") }) By("Connecting to launched resources", func() { // Load Networks - networkRegistry := client.NewNetworkRegistry() + networkRegistry := client.NewDefaultNetworkRegistry() var err error networks, err = networkRegistry.GetNetworks(env) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to blockchain nodes shouldn't fail") contractDeployer, err = contracts.NewContractDeployer(networks.Default) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying contracts shouldn't fail") chainlinkNodes, err = client.ConnectChainlinkNodes(env) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to chainlink nodes shouldn't fail") mockserver, err = client.ConnectMockServer(env) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Creating mockserver clients shouldn't fail") networks.Default.ParallelTransactions(true) Expect(err).ShouldNot(HaveOccurred()) linkTokenContract, err = contractDeployer.DeployLinkTokenContract() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying Link Token Contract shouldn't fail") }) By("Funding Chainlink nodes", func() { @@ -73,20 +78,21 @@ var _ = Describe("OCR Feed @ocr", func() { Describe("With a single OCR contract", func() { It("performs two rounds", func() { - By("setting adapter responses", actions.SetAllAdapterResponsesToTheSameValue(5, ocrInstances, chainlinkNodes, mockserver)) + By("Setting adapter responses", actions.SetAllAdapterResponsesToTheSameValue(5, ocrInstances, chainlinkNodes, mockserver)) By("Creating OCR jobs", actions.CreateOCRJobs(ocrInstances, chainlinkNodes, mockserver)) - By("starting new round", actions.StartNewRound(1, ocrInstances, networks)) + + By("Starting new round", actions.StartNewRound(1, ocrInstances, networks)) answer, err := ocrInstances[0].GetLatestAnswer(context.Background()) - Expect(err).ShouldNot(HaveOccurred()) - Expect(answer.Int64()).Should(Equal(int64(5)), "latest answer from OCR is not as expected") + Expect(err).ShouldNot(HaveOccurred(), "Getting latest answer from OCR contract shouldn't fail") + Expect(answer.Int64()).Should(Equal(int64(5)), "Expected latest answer from OCR contract to be 5 but got %d", answer.Int64()) By("setting adapter responses", actions.SetAllAdapterResponsesToTheSameValue(10, ocrInstances, chainlinkNodes, mockserver)) By("starting new round", actions.StartNewRound(2, ocrInstances, networks)) answer, err = ocrInstances[0].GetLatestAnswer(context.Background()) Expect(err).ShouldNot(HaveOccurred()) - Expect(answer.Int64()).Should(Equal(int64(10)), "latest answer from OCR is not as expected") + Expect(answer.Int64()).Should(Equal(int64(10)), "Expected latest answer from OCR contract to be 10 but got %d", answer.Int64()) }) }) @@ -95,8 +101,8 @@ var _ = Describe("OCR Feed @ocr", func() { networks.Default.GasStats().PrintStats() }) By("Tearing down the environment", func() { - err = actions.TeardownSuite(env, networks, utils.ProjectRoot, nil) - Expect(err).ShouldNot(HaveOccurred()) + err = actions.TeardownSuite(env, networks, utils.ProjectRoot, chainlinkNodes, nil) + Expect(err).ShouldNot(HaveOccurred(), "Environment teardown shouldn't fail") }) }) }) diff --git a/integration-tests/smoke/runlog_test.go b/integration-tests/smoke/runlog_test.go index 7b3031ca954..21154d8dc27 100644 --- a/integration-tests/smoke/runlog_test.go +++ b/integration-tests/smoke/runlog_test.go @@ -1,4 +1,4 @@ -package smoke_test +package smoke //revive:disable:dot-imports import ( @@ -15,68 +15,73 @@ import ( "github.com/smartcontractkit/helmenv/tools" "github.com/smartcontractkit/integrations-framework/actions" "github.com/smartcontractkit/integrations-framework/client" + "github.com/smartcontractkit/integrations-framework/config" "github.com/smartcontractkit/integrations-framework/contracts" "github.com/smartcontractkit/integrations-framework/utils" ) var _ = Describe("Direct request suite @runlog", func() { var ( - err error - nets *client.Networks - cd contracts.ContractDeployer - cls []client.Chainlink - oracle contracts.Oracle - consumer contracts.APIConsumer - jobUUID uuid.UUID - mockserver *client.MockserverClient - e *environment.Environment + err error + nets *client.Networks + cd contracts.ContractDeployer + chainlinkNodes []client.Chainlink + oracle contracts.Oracle + consumer contracts.APIConsumer + jobUUID uuid.UUID + mockserver *client.MockserverClient + e *environment.Environment ) BeforeEach(func() { By("Deploying the environment", func() { e, err = environment.DeployOrLoadEnvironment( - environment.NewChainlinkConfig(environment.ChainlinkReplicas(3, nil), ""), + environment.NewChainlinkConfig( + environment.ChainlinkReplicas(3, config.ChainlinkVals()), + "chainlink-runlog-core-ci", + config.GethNetworks()..., + ), tools.ChartsRoot, ) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Environment deployment shouldn't fail") err = e.ConnectAll() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to all nodes shouldn't fail") }) By("Connecting to launched resources", func() { - networkRegistry := client.NewNetworkRegistry() + networkRegistry := client.NewDefaultNetworkRegistry() nets, err = networkRegistry.GetNetworks(e) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to blockchain nodes shouldn't fail") cd, err = contracts.NewContractDeployer(nets.Default) - Expect(err).ShouldNot(HaveOccurred()) - cls, err = client.ConnectChainlinkNodes(e) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying contracts shouldn't fail") + chainlinkNodes, err = client.ConnectChainlinkNodes(e) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to chainlink nodes shouldn't fail") mockserver, err = client.ConnectMockServer(e) Expect(err).ShouldNot(HaveOccurred()) }) By("Funding Chainlink nodes", func() { ethAmount, err := nets.Default.EstimateCostForChainlinkOperations(1) - Expect(err).ShouldNot(HaveOccurred()) - err = actions.FundChainlinkNodes(cls, nets.Default, ethAmount) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Estimating cost for Chainlink Operations shouldn't fail") + err = actions.FundChainlinkNodes(chainlinkNodes, nets.Default, ethAmount) + Expect(err).ShouldNot(HaveOccurred(), "Funding chainlink nodes with ETH shouldn't fail") }) By("Deploying contracts", func() { lt, err := cd.DeployLinkTokenContract() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying Link Token Contract shouldn't fail") oracle, err = cd.DeployOracle(lt.Address()) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying Oracle Contract shouldn't fail") consumer, err = cd.DeployAPIConsumer(lt.Address()) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying Consumer Contract shouldn't fail") err = nets.Default.SetWallet(0) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Setting default wallet shouldn't fail") err = lt.Transfer(consumer.Address(), big.NewInt(2e18)) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Transferring %d to consumer contract shouldn't fail", big.NewInt(2e18)) }) By("Creating directrequest job", func() { err = mockserver.SetValuePath("/variable", 5) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Setting mockserver value path shouldn't fail") jobUUID = uuid.NewV4() @@ -84,23 +89,23 @@ var _ = Describe("Direct request suite @runlog", func() { Name: fmt.Sprintf("five-%s", jobUUID.String()), URL: fmt.Sprintf("%s/variable", mockserver.Config.ClusterURL), } - err = cls[0].CreateBridge(&bta) - Expect(err).ShouldNot(HaveOccurred()) + err = chainlinkNodes[0].CreateBridge(&bta) + Expect(err).ShouldNot(HaveOccurred(), "Creating bridge shouldn't fail") os := &client.DirectRequestTxPipelineSpec{ BridgeTypeAttributes: bta, DataPath: "data,result", } ost, err := os.String() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Building observation source spec shouldn't fail") - _, err = cls[0].CreateJob(&client.DirectRequestJobSpec{ + _, err = chainlinkNodes[0].CreateJob(&client.DirectRequestJobSpec{ Name: "direct_request", ContractAddress: oracle.Address(), ExternalJobID: jobUUID.String(), ObservationSource: ost, }) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Creating direct_request job shouldn't fail") }) By("Calling oracle contract", func() { @@ -115,7 +120,7 @@ var _ = Describe("Direct request suite @runlog", func() { "data,result", big.NewInt(100), ) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Calling oracle contract shouldn't fail") }) }) @@ -123,10 +128,10 @@ var _ = Describe("Direct request suite @runlog", func() { It("receives API call data on-chain", func() { Eventually(func(g Gomega) { d, err := consumer.Data(context.Background()) - g.Expect(err).ShouldNot(HaveOccurred()) - g.Expect(d).ShouldNot(BeNil()) + g.Expect(err).ShouldNot(HaveOccurred(), "Getting data from consumer contract shouldn't fail") + g.Expect(d).ShouldNot(BeNil(), "Expected the initial on chain data to be nil") log.Debug().Int64("Data", d.Int64()).Msg("Found on chain") - g.Expect(d.Int64()).Should(BeNumerically("==", 5)) + g.Expect(d.Int64()).Should(BeNumerically("==", 5), "Expected the on-chain data to be 5, but found %d", d.Int64()) }, "2m", "1s").Should(Succeed()) }) }) @@ -134,8 +139,8 @@ var _ = Describe("Direct request suite @runlog", func() { AfterEach(func() { By("Tearing down the environment", func() { nets.Default.GasStats().PrintStats() - err = actions.TeardownSuite(e, nets, utils.ProjectRoot, nil) - Expect(err).ShouldNot(HaveOccurred()) + err = actions.TeardownSuite(e, nets, utils.ProjectRoot, chainlinkNodes, nil) + Expect(err).ShouldNot(HaveOccurred(), "Environment teardown shouldn't fail") }) }) }) diff --git a/integration-tests/smoke/suite_test.go b/integration-tests/smoke/suite_test.go index 45fdd408301..08d5e3987e3 100644 --- a/integration-tests/smoke/suite_test.go +++ b/integration-tests/smoke/suite_test.go @@ -1,14 +1,15 @@ package smoke_test +//revive:disable:dot-imports import ( "testing" - "github.com/smartcontractkit/integrations-framework/utils" + "github.com/smartcontractkit/integrations-framework/actions" . "github.com/onsi/ginkgo/v2" ) func Test_Suite(t *testing.T) { - utils.GinkgoSuite("../") + actions.GinkgoSuite("../") RunSpecs(t, "Integration") } diff --git a/integration-tests/smoke/vrf_test.go b/integration-tests/smoke/vrf_test.go index 6ab2ed587ef..29925f241ae 100644 --- a/integration-tests/smoke/vrf_test.go +++ b/integration-tests/smoke/vrf_test.go @@ -1,4 +1,4 @@ -package smoke_test +package smoke //revive:disable:dot-imports import ( @@ -15,6 +15,7 @@ import ( "github.com/smartcontractkit/helmenv/tools" "github.com/smartcontractkit/integrations-framework/actions" "github.com/smartcontractkit/integrations-framework/client" + "github.com/smartcontractkit/integrations-framework/config" "github.com/smartcontractkit/integrations-framework/contracts" "github.com/smartcontractkit/integrations-framework/utils" ) @@ -28,7 +29,7 @@ var _ = Describe("VRF suite @vrf", func() { coordinator contracts.VRFCoordinator encodedProvingKeys = make([][2]*big.Int, 0) lt contracts.LinkToken - cls []client.Chainlink + chainlinkNodes []client.Chainlink e *environment.Environment job *client.Job ) @@ -36,53 +37,60 @@ var _ = Describe("VRF suite @vrf", func() { BeforeEach(func() { By("Deploying the environment", func() { e, err = environment.DeployOrLoadEnvironment( - environment.NewChainlinkConfig(nil, ""), + environment.NewChainlinkConfig( + config.ChainlinkVals(), + "chainlink-vrf-core-ci", + config.GethNetworks()..., + ), tools.ChartsRoot, ) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Environment deployment shouldn't fail") err = e.ConnectAll() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to all nodes shouldn't fail") }) By("Connecting to launched resources", func() { - networkRegistry := client.NewNetworkRegistry() + networkRegistry := client.NewDefaultNetworkRegistry() nets, err = networkRegistry.GetNetworks(e) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to blockchain nodes shouldn't fail") cd, err = contracts.NewContractDeployer(nets.Default) - Expect(err).ShouldNot(HaveOccurred()) - cls, err = client.ConnectChainlinkNodes(e) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying contracts shouldn't fail") + chainlinkNodes, err = client.ConnectChainlinkNodes(e) + Expect(err).ShouldNot(HaveOccurred(), "Connecting to chainlink nodes shouldn't fail") nets.Default.ParallelTransactions(true) }) By("Funding Chainlink nodes", func() { txCost, err := nets.Default.EstimateCostForChainlinkOperations(1) - Expect(err).ShouldNot(HaveOccurred()) - err = actions.FundChainlinkNodes(cls, nets.Default, txCost) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Estimating cost for Chainlink Operations shouldn't fail") + err = actions.FundChainlinkNodes(chainlinkNodes, nets.Default, txCost) + Expect(err).ShouldNot(HaveOccurred(), "Funding chainlink nodes with ETH shouldn't fail") }) By("Deploying VRF contracts", func() { lt, err = cd.DeployLinkTokenContract() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying Link Token Contract shouldn't fail") bhs, err := cd.DeployBlockhashStore() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying Blockhash store shouldn't fail") coordinator, err = cd.DeployVRFCoordinator(lt.Address(), bhs.Address()) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying VRF coordinator shouldn't fail") consumer, err = cd.DeployVRFConsumer(lt.Address(), coordinator.Address()) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying VRF consumer contract shouldn't fail") + err = nets.Default.WaitForEvents() + Expect(err).ShouldNot(HaveOccurred(), "Failed to wait for VRF setup contracts to deploy") + err = lt.Transfer(consumer.Address(), big.NewInt(2e18)) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Funding consumer contract shouldn't fail") _, err = cd.DeployVRFContract() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Deploying VRF contract shouldn't fail") err = nets.Default.WaitForEvents() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Waiting for event subscriptions in nodes shouldn't fail") }) By("Creating jobs and registering proving keys", func() { - for _, n := range cls { + for _, n := range chainlinkNodes { nodeKey, err := n.CreateVRFKey() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Creating VRF key shouldn't fail") log.Debug().Interface("Key JSON", nodeKey).Msg("Created proving key") pubKeyCompressed := nodeKey.Data.ID jobUUID := uuid.NewV4() @@ -90,7 +98,7 @@ var _ = Describe("VRF suite @vrf", func() { Address: coordinator.Address(), } ost, err := os.String() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Building observation source spec shouldn't fail") job, err = n.CreateJob(&client.VRFJobSpec{ Name: fmt.Sprintf("vrf-%s", jobUUID), CoordinatorAddress: coordinator.Address(), @@ -99,19 +107,19 @@ var _ = Describe("VRF suite @vrf", func() { ExternalJobID: jobUUID.String(), ObservationSource: ost, }) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Creating VRF Job shouldn't fail") oracleAddr, err := n.PrimaryEthAddress() - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Getting primary ETH address of chainlink node shouldn't fail") provingKey, err := actions.EncodeOnChainVRFProvingKey(*nodeKey) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Encoding on-chain VRF Proving key shouldn't fail") err = coordinator.RegisterProvingKey( big.NewInt(1), oracleAddr, provingKey, actions.EncodeOnChainExternalJobID(jobUUID), ) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Registering the on-chain VRF Proving key shouldn't fail") encodedProvingKeys = append(encodedProvingKeys, provingKey) } }) @@ -120,18 +128,18 @@ var _ = Describe("VRF suite @vrf", func() { Describe("with VRF job", func() { It("randomness is fulfilled", func() { requestHash, err := coordinator.HashOfKey(context.Background(), encodedProvingKeys[0]) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Getting Hash of encoded proving keys shouldn't fail") err = consumer.RequestRandomness(requestHash, big.NewInt(1)) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred(), "Requesting randomness shouldn't fail") timeout := time.Minute * 2 Eventually(func(g Gomega) { - jobRuns, err := cls[0].ReadRunsByJob(job.Data.ID) - g.Expect(err).ShouldNot(HaveOccurred()) + jobRuns, err := chainlinkNodes[0].ReadRunsByJob(job.Data.ID) + g.Expect(err).ShouldNot(HaveOccurred(), "Job execution shouldn't fail") out, err := consumer.RandomnessOutput(context.Background()) - g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(err).ShouldNot(HaveOccurred(), "Getting the randomness output of the consumer shouldn't fail") // Checks that the job has actually run g.Expect(len(jobRuns.Data)).Should(BeNumerically(">=", 1), fmt.Sprintf("Expected the VRF job to run once or more after %s", timeout)) @@ -150,10 +158,9 @@ var _ = Describe("VRF suite @vrf", func() { By("Printing gas stats", func() { nets.Default.GasStats().PrintStats() }) - By("Tearing down the environment", func() { - err = actions.TeardownSuite(e, nets, utils.ProjectRoot, nil) - Expect(err).ShouldNot(HaveOccurred()) + err = actions.TeardownSuite(e, nets, utils.ProjectRoot, chainlinkNodes, nil) + Expect(err).ShouldNot(HaveOccurred(), "Environment teardown shouldn't fail") }) }) }) diff --git a/integration-tests/smoke/vrfv2_test.go b/integration-tests/smoke/vrfv2_test.go new file mode 100644 index 00000000000..d94b21b58b0 --- /dev/null +++ b/integration-tests/smoke/vrfv2_test.go @@ -0,0 +1,186 @@ +package smoke_test + +//revive:disable:dot-imports +import ( + "context" + "fmt" + "math/big" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/rs/zerolog/log" + uuid "github.com/satori/go.uuid" + "github.com/smartcontractkit/helmenv/environment" + "github.com/smartcontractkit/helmenv/tools" + "github.com/smartcontractkit/integrations-framework/actions" + "github.com/smartcontractkit/integrations-framework/client" + "github.com/smartcontractkit/integrations-framework/contracts" + "github.com/smartcontractkit/integrations-framework/contracts/ethereum" + "github.com/smartcontractkit/integrations-framework/utils" +) + +var _ = Describe("VRFv2 suite @v2vrf", func() { + var ( + err error + nets *client.Networks + cd contracts.ContractDeployer + consumer contracts.VRFConsumerV2 + coordinator contracts.VRFCoordinatorV2 + encodedProvingKeys = make([][2]*big.Int, 0) + lt contracts.LinkToken + cls []client.Chainlink + e *environment.Environment + vrfKey *client.VRFKey + job *client.Job + // used both as a feed and a fallback value + linkEthFeedResponse = big.NewInt(1e18) + ) + + BeforeEach(func() { + By("Deploying the environment", func() { + e, err = environment.DeployOrLoadEnvironment( + environment.NewChainlinkConfig( + nil, + "", + // works only on perf Geth + environment.PerformanceGeth, + ), + tools.ChartsRoot, + ) + Expect(err).ShouldNot(HaveOccurred()) + err = e.ConnectAll() + Expect(err).ShouldNot(HaveOccurred()) + }) + + By("Connecting to launched resources", func() { + networkRegistry := client.NewDefaultNetworkRegistry() + nets, err = networkRegistry.GetNetworks(e) + Expect(err).ShouldNot(HaveOccurred()) + cd, err = contracts.NewContractDeployer(nets.Default) + Expect(err).ShouldNot(HaveOccurred()) + cls, err = client.ConnectChainlinkNodes(e) + Expect(err).ShouldNot(HaveOccurred()) + nets.Default.ParallelTransactions(true) + }) + By("Funding Chainlink nodes", func() { + err = actions.FundChainlinkNodes(cls, nets.Default, big.NewFloat(3)) + Expect(err).ShouldNot(HaveOccurred()) + }) + + By("Deploying VRF contracts", func() { + lt, err = cd.DeployLinkTokenContract() + Expect(err).ShouldNot(HaveOccurred()) + bhs, err := cd.DeployBlockhashStore() + Expect(err).ShouldNot(HaveOccurred()) + mf, err := cd.DeployMockETHLINKFeed(linkEthFeedResponse) + Expect(err).ShouldNot(HaveOccurred()) + coordinator, err = cd.DeployVRFCoordinatorV2(lt.Address(), bhs.Address(), mf.Address()) + Expect(err).ShouldNot(HaveOccurred()) + consumer, err = cd.DeployVRFConsumerV2(lt.Address(), coordinator.Address()) + Expect(err).ShouldNot(HaveOccurred()) + err = nets.Default.WaitForEvents() + Expect(err).ShouldNot(HaveOccurred()) + + err = lt.Transfer(consumer.Address(), big.NewInt(0).Mul(big.NewInt(1e4), big.NewInt(1e18))) + Expect(err).ShouldNot(HaveOccurred()) + err = coordinator.SetConfig( + 1, + 2.5e6, + 86400, + 33825, + linkEthFeedResponse, + ethereum.VRFCoordinatorV2FeeConfig{ + FulfillmentFlatFeeLinkPPMTier1: 1, + FulfillmentFlatFeeLinkPPMTier2: 1, + FulfillmentFlatFeeLinkPPMTier3: 1, + FulfillmentFlatFeeLinkPPMTier4: 1, + FulfillmentFlatFeeLinkPPMTier5: 1, + ReqsForTier2: big.NewInt(10), + ReqsForTier3: big.NewInt(20), + ReqsForTier4: big.NewInt(30), + ReqsForTier5: big.NewInt(40)}, + ) + Expect(err).ShouldNot(HaveOccurred()) + err = nets.Default.WaitForEvents() + Expect(err).ShouldNot(HaveOccurred()) + + err = consumer.CreateFundedSubscription(big.NewInt(0).Mul(big.NewInt(30), big.NewInt(1e18))) + Expect(err).ShouldNot(HaveOccurred()) + err = nets.Default.WaitForEvents() + Expect(err).ShouldNot(HaveOccurred()) + }) + + By("Creating jobs and registering proving keys", func() { + for _, n := range cls { + vrfKey, err = n.CreateVRFKey() + Expect(err).ShouldNot(HaveOccurred()) + log.Debug().Interface("Key JSON", vrfKey).Msg("Created proving key") + pubKeyCompressed := vrfKey.Data.ID + jobUUID := uuid.NewV4() + os := &client.VRFV2TxPipelineSpec{ + Address: coordinator.Address(), + } + ost, err := os.String() + Expect(err).ShouldNot(HaveOccurred()) + oracleAddr, err := n.PrimaryEthAddress() + Expect(err).ShouldNot(HaveOccurred()) + job, err = n.CreateJob(&client.VRFV2JobSpec{ + Name: fmt.Sprintf("vrf-%s", jobUUID), + CoordinatorAddress: coordinator.Address(), + FromAddress: oracleAddr, + EVMChainID: "1337", + MinIncomingConfirmations: 1, + PublicKey: pubKeyCompressed, + ExternalJobID: jobUUID.String(), + ObservationSource: ost, + BatchFulfillmentEnabled: false, + }) + Expect(err).ShouldNot(HaveOccurred()) + provingKey, err := actions.EncodeOnChainVRFProvingKey(*vrfKey) + Expect(err).ShouldNot(HaveOccurred()) + err = coordinator.RegisterProvingKey( + oracleAddr, + provingKey, + ) + Expect(err).ShouldNot(HaveOccurred()) + encodedProvingKeys = append(encodedProvingKeys, provingKey) + } + }) + }) + + Describe("with VRF job", func() { + It("randomness is fulfilled", func() { + words := uint32(10) + keyHash, err := coordinator.HashOfKey(context.Background(), encodedProvingKeys[0]) + Expect(err).ShouldNot(HaveOccurred()) + err = consumer.RequestRandomness(keyHash, 1, 1, 300000, words) + Expect(err).ShouldNot(HaveOccurred()) + + timeout := time.Minute * 2 + + Eventually(func(g Gomega) { + jobRuns, err := cls[0].ReadRunsByJob(job.Data.ID) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(len(jobRuns.Data)).Should(BeNumerically("==", 1)) + randomness, err := consumer.GetAllRandomWords(context.Background(), int(words)) + g.Expect(err).ShouldNot(HaveOccurred()) + for _, w := range randomness { + log.Debug().Uint64("Output", w.Uint64()).Msg("Randomness fulfilled") + g.Expect(w.Uint64()).Should(Not(BeNumerically("==", 0)), "Expected the VRF job give an answer other than 0") + } + }, timeout, "1s").Should(Succeed()) + }) + }) + + AfterEach(func() { + By("Printing gas stats", func() { + nets.Default.GasStats().PrintStats() + }) + + By("Tearing down the environment", func() { + err = actions.TeardownSuite(e, nets, utils.ProjectRoot, nil, nil) + Expect(err).ShouldNot(HaveOccurred()) + }) + }) +}) diff --git a/operator_ui/src/screens/Job/JobView.tsx b/operator_ui/src/screens/Job/JobView.tsx index 9dcb69c5de1..8659086bc6d 100644 --- a/operator_ui/src/screens/Job/JobView.tsx +++ b/operator_ui/src/screens/Job/JobView.tsx @@ -1,20 +1,17 @@ -import React from 'react' - import { gql } from '@apollo/client' -import { Route, Switch, useRouteMatch } from 'react-router-dom' - import Grid from '@material-ui/core/Grid' - +import React from 'react' +import { Route, Switch, useRouteMatch } from 'react-router-dom' +import Button from 'src/components/Button' import Content from 'src/components/Content' import { Heading1 } from 'src/components/Heading/Heading1' import { JobCard } from './JobCard' import { JobTabs } from './JobTabs' +import { RunJobDialog } from './RunJobDialog' import { TabDefinition } from './TabDefinition' import { TabErrors } from './TabErrors' import { TabOverview } from './TabOverview' import { TabRuns } from './TabRuns' -import Button from 'src/components/Button' -import { RunJobDialog } from './RunJobDialog' const JOB_PAYLOAD__SPEC = gql` fragment JobPayload_Spec on JobSpec { @@ -26,7 +23,7 @@ const JOB_PAYLOAD__SPEC = gql` evmChainID minIncomingConfirmations minIncomingConfirmationsEnv - # minContractPayment - This is not being used but should we display it? + minContractPaymentLinkJuels requesters } ... on FluxMonitorSpec { @@ -89,6 +86,13 @@ const JOB_PAYLOAD__SPEC = gql` pollPeriod publicKey requestedConfsDelay + batchCoordinatorAddress + batchFulfillmentEnabled + batchFulfillmentGasMultiplier + chunkSize + requestTimeout + backoffInitialDelay + backoffMaxDelay } ... on BlockhashStoreSpec { coordinatorV1Address diff --git a/operator_ui/src/screens/Job/__snapshots__/TabDefinition.test.tsx.snap b/operator_ui/src/screens/Job/__snapshots__/TabDefinition.test.tsx.snap index f9cb175e83a..38222c12387 100644 --- a/operator_ui/src/screens/Job/__snapshots__/TabDefinition.test.tsx.snap +++ b/operator_ui/src/screens/Job/__snapshots__/TabDefinition.test.tsx.snap @@ -9,6 +9,7 @@ maxTaskDuration = \\"10s\\" contractAddress = \\"0x0000000000000000000000000000000000000000\\" evmChainID = \\"42\\" minIncomingConfirmations = 3 +minContractPaymentLinkJuels = \\"100000000000000\\" requesters = [ \\"0x59bbE8CFC79c76857fE0eC27e67E4957370d72B5\\" ] observationSource = \\"\\"\\" fetch [type=http method=POST url=\\"http://localhost:8001\\" requestData=\\"{\\\\\\\\\\"hi\\\\\\\\\\": \\\\\\\\\\"hello\\\\\\\\\\"}\\"]; diff --git a/operator_ui/src/screens/Job/generateJobDefinition.test.ts b/operator_ui/src/screens/Job/generateJobDefinition.test.ts index 850608bec5c..c8b88eb48aa 100644 --- a/operator_ui/src/screens/Job/generateJobDefinition.test.ts +++ b/operator_ui/src/screens/Job/generateJobDefinition.test.ts @@ -62,6 +62,7 @@ observationSource = """ evmChainID: '42', minIncomingConfirmations: 3, minIncomingConfirmationsEnv: false, + minContractPaymentLinkJuels: '100000000000000', requesters: ['0x59bbE8CFC79c76857fE0eC27e67E4957370d72B5'], }, observationSource: @@ -77,6 +78,7 @@ maxTaskDuration = "10s" contractAddress = "0x0000000000000000000000000000000000000000" evmChainID = "42" minIncomingConfirmations = 3 +minContractPaymentLinkJuels = "100000000000000" requesters = [ "0x59bbE8CFC79c76857fE0eC27e67E4957370d72B5" ] observationSource = """ fetch [type=http method=POST url="http://localhost:8001" requestData="{\\\\"hi\\\\": \\\\"hello\\\\"}"]; @@ -425,6 +427,13 @@ juelsPerFeeCoinSource = "1000000000" publicKey: '0x92594ee04c179eb7d439ff1baacd98b81a7d7a6ed55c86ca428fa025bd9c914301', requestedConfsDelay: 0, + requestTimeout: '1h', + batchCoordinatorAddress: '0x0000000000000000000000000000000000000000', + batchFulfillmentEnabled: true, + batchFulfillmentGasMultiplier: 1.0, + chunkSize: 25, + backoffInitialDelay: '1m', + backoffMaxDelay: '1h', }, observationSource: ' fetch [type=http method=POST url="http://localhost:8001" requestData="{\\"hi\\": \\"hello\\"}"];\n parse [type=jsonparse path="data,result"];\n multiply [type=multiply times=100];\n fetch -> parse -> multiply;\n', @@ -442,6 +451,13 @@ minIncomingConfirmations = 6 pollPeriod = "10s" publicKey = "0x92594ee04c179eb7d439ff1baacd98b81a7d7a6ed55c86ca428fa025bd9c914301" requestedConfsDelay = 0 +requestTimeout = "1h" +batchCoordinatorAddress = "0x0000000000000000000000000000000000000000" +batchFulfillmentEnabled = true +batchFulfillmentGasMultiplier = 1 +chunkSize = 25 +backoffInitialDelay = "1m" +backoffMaxDelay = "1h" observationSource = """ fetch [type=http method=POST url="http://localhost:8001" requestData="{\\\\"hi\\\\": \\\\"hello\\\\"}"]; parse [type=jsonparse path="data,result"]; diff --git a/operator_ui/src/screens/Job/generateJobDefinition.ts b/operator_ui/src/screens/Job/generateJobDefinition.ts index d3ae7e04944..dedcf33cda8 100644 --- a/operator_ui/src/screens/Job/generateJobDefinition.ts +++ b/operator_ui/src/screens/Job/generateJobDefinition.ts @@ -106,6 +106,7 @@ export const generateJobDefinition = ( 'contractAddress', 'evmChainID', 'minIncomingConfirmations', + 'minContractPaymentLinkJuels', 'requesters', ), ...extractObservationSourceField(job), @@ -204,6 +205,13 @@ export const generateJobDefinition = ( 'pollPeriod', 'publicKey', 'requestedConfsDelay', + 'requestTimeout', + 'batchCoordinatorAddress', + 'batchFulfillmentEnabled', + 'batchFulfillmentGasMultiplier', + 'chunkSize', + 'backoffInitialDelay', + 'backoffMaxDelay', ), ...extractObservationSourceField(job), } diff --git a/operator_ui/support/factories/gql/fetchJob.ts b/operator_ui/support/factories/gql/fetchJob.ts index 40d957c8f4a..82e24a657ed 100644 --- a/operator_ui/support/factories/gql/fetchJob.ts +++ b/operator_ui/support/factories/gql/fetchJob.ts @@ -20,6 +20,7 @@ export function buildJob( evmChainID: '42', minIncomingConfirmations: 3, minIncomingConfirmationsEnv: false, + minContractPaymentLinkJuels: '100000000000000', requesters: ['0x59bbE8CFC79c76857fE0eC27e67E4957370d72B5'], }, runs: { diff --git a/tools/ci/check_solc_hashes b/tools/ci/check_solc_hashes index 1b8fe05e705..177ea948359 100755 --- a/tools/ci/check_solc_hashes +++ b/tools/ci/check_solc_hashes @@ -9,6 +9,7 @@ set -e SOLC_6_6_LOCAL_PATH="$HOME/.solc-select/artifacts/solc-0.6.6" SOLC_7_6_LOCAL_PATH="$HOME/.solc-select/artifacts/solc-0.7.6" SOLC_8_6_LOCAL_PATH="$HOME/.solc-select/artifacts/solc-0.8.6" +SOLC_8_13_LOCAL_PATH="$HOME/.solc-select/artifacts/solc-0.8.13" SOLC_6_6_LOCAL_SHA=`sha256sum -b $SOLC_6_6_LOCAL_PATH | cut -d " " -f1` SOLC_6_6_EXPECTED_SHA="5d8cd4e0cc02e9946497db68c06d56326a78ff95a21c9265cfedb819a10a539d" @@ -19,6 +20,9 @@ SOLC_7_6_EXPECTED_SHA="bd69ea85427bf2f4da74cb426ad951dd78db9dfdd01d791208eccc2d4 SOLC_8_6_LOCAL_SHA=`sha256sum -b $SOLC_8_6_LOCAL_PATH | cut -d " " -f1` SOLC_8_6_EXPECTED_SHA="abd5c4f3f262bc3ed7951b968c63f98e83f66d9a5c3568ab306eac49250aec3e" +SOLC_8_13_LOCAL_SHA=`sha256sum -b $SOLC_8_13_LOCAL_PATH | cut -d " " -f1` +SOLC_8_13_EXPECTED_SHA="a805dffa86ccd8ed5c9cd18ffcfcca6ff46f635216aa7fc0246546f7be413d62" + if [ "$SOLC_6_6_LOCAL_SHA" != "$SOLC_6_6_EXPECTED_SHA" ]; then printf "solc 0.6.6 did not match checksum.\nGot '$SOLC_6_6_LOCAL_SHA'\nExpected '$SOLC_6_6_EXPECTED_SHA']\n" exit 1 @@ -34,3 +38,7 @@ if [ "$SOLC_8_6_LOCAL_SHA" != "$SOLC_8_6_EXPECTED_SHA" ]; then exit 1 fi +if [ "$SOLC_8_13_LOCAL_SHA" != "$SOLC_8_13_EXPECTED_SHA" ]; then + printf "solc 0.8.13 did not match checksum.\nGot '$SOLC_8_13_LOCAL_SHA'\nExpected '$SOLC_8_13_EXPECTED_SHA'\n" + exit 1 +fi diff --git a/tools/ci/install_solana b/tools/ci/install_solana new file mode 100755 index 00000000000..e01dbc5983e --- /dev/null +++ b/tools/ci/install_solana @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -euxo pipefail + +sh -c "$(curl -sSfL https://release.solana.com/v1.9.12/install)" +echo "PATH=$HOME/.local/share/solana/install/active_release/bin:$PATH" >> $GITHUB_ENV