diff --git a/.changeset/forty-foxes-watch.md b/.changeset/forty-foxes-watch.md new file mode 100644 index 00000000000..cb118d50021 --- /dev/null +++ b/.changeset/forty-foxes-watch.md @@ -0,0 +1,5 @@ +--- +"chainlink": minor +--- + +Updated Solana TXM's in-memory storage to track statuses across the Solana transaction lifecycle. Added a method to translate Solana transaction statuses into states expected by the ChainWriter interface. Made the duration transactions are retained in storage after finality or error configurable using `TxRetentionTimeout`. #added diff --git a/.changeset/healthy-shirts-remain.md b/.changeset/healthy-shirts-remain.md new file mode 100644 index 00000000000..0ce310e1ce3 --- /dev/null +++ b/.changeset/healthy-shirts-remain.md @@ -0,0 +1,5 @@ +--- +"chainlink": minor +--- + +#removed Remove unused deprecated key interfaces. diff --git a/.changeset/nine-stingrays-march.md b/.changeset/nine-stingrays-march.md new file mode 100644 index 00000000000..c2f88d95663 --- /dev/null +++ b/.changeset/nine-stingrays-march.md @@ -0,0 +1,5 @@ +--- +"chainlink": minor +--- + +Add don_id to Mercury Enhanced EA telemetry #added diff --git a/.github/actions/golangci-lint/action.yml b/.github/actions/golangci-lint/action.yml index ddfae02a895..20ad2689deb 100644 --- a/.github/actions/golangci-lint/action.yml +++ b/.github/actions/golangci-lint/action.yml @@ -1,10 +1,6 @@ name: CI lint for Golang description: Runs CI lint for Golang inputs: - # general inputs - name: - description: Name of the lint action - required: true go-directory: description: Go directory to run commands from default: "." @@ -25,10 +21,17 @@ inputs: runs: using: composite steps: - - uses: actions/checkout@v4.2.1 + - name: Checkout repo (full) + uses: actions/checkout@v4.2.1 + # Only do a full checkout on merge_groups + if: github.event_name == 'merge_group' with: - # We only need a full clone on merge_group events for golangci-lint. - fetch-depth: ${{ github.event_name == 'merge_group' && '0' || '1' }}" + fetch-depth: 0 + - name: Checkout repo + uses: actions/checkout@v4.2.1 + if: github.event_name != 'merge_group' + with: + fetch-depth: 1 - name: Setup Go uses: ./.github/actions/setup-go with: diff --git a/.github/e2e-tests.yml b/.github/e2e-tests.yml index 3b91bd251a1..aee250420e0 100644 --- a/.github/e2e-tests.yml +++ b/.github/e2e-tests.yml @@ -948,6 +948,20 @@ runner-test-matrix: test_env_vars: E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 E2E_JD_VERSION: 0.4.0 + + - id: smoke/ccip_messaging_test.go:* + path: integration-tests/smoke/ccip_messaging_test.go + test_env_type: docker + runs_on: ubuntu-latest + triggers: + - PR E2E Core Tests + - Merge Queue E2E Core Tests + - Nightly E2E Tests + test_cmd: cd integration-tests/ && go test smoke/ccip_messaging_test.go -timeout 12m -test.parallel=1 -count=1 -json + pyroscope_env: ci-smoke-ccipv1_6-evm-simulated + test_env_vars: + E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 + E2E_JD_VERSION: 0.4.0 # END: CCIPv1.6 tests @@ -1178,4 +1192,4 @@ runner-test-matrix: TEST_LOG_LEVEL: debug E2E_TEST_GRAFANA_DASHBOARD_URL: /d/6vjVx-1V8/ccip-long-running-tests - # END: CCIP tests \ No newline at end of file + # END: CCIP tests diff --git a/.github/workflows/build-publish-develop-pr.yml b/.github/workflows/build-publish-develop-pr.yml index caf46c1a3ed..68075422adf 100644 --- a/.github/workflows/build-publish-develop-pr.yml +++ b/.github/workflows/build-publish-develop-pr.yml @@ -24,7 +24,10 @@ on: default: "false" env: - GIT_REF: ${{ github.event.inputs.git_ref || github.ref }} + # Use github.sha here otherwise a race condition exists if + # a commit is pushed to develop before merge is run. + CHECKOUT_REF: ${{ github.event.inputs.git_ref || github.sha }} + jobs: merge: @@ -38,7 +41,7 @@ jobs: - name: Checkout repository uses: actions/checkout@v4.2.1 with: - ref: ${{ env.GIT_REF }} + ref: ${{ env.CHECKOUT_REF }} - name: Configure aws credentials uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 @@ -48,13 +51,13 @@ jobs: mask-aws-account-id: true role-session-name: "merge" - - uses: actions/cache/restore@v4 + - uses: actions/cache/restore@v4.1.1 with: path: dist/linux_amd64_v1 key: chainlink-amd64-${{ github.sha }} fail-on-cache-miss: true - - uses: actions/cache/restore@v4 + - uses: actions/cache/restore@v4.1.1 with: path: dist/linux_arm64_v8.0 key: chainlink-arm64-${{ github.sha }} @@ -91,7 +94,7 @@ jobs: - name: Checkout repository uses: actions/checkout@v4.2.1 with: - ref: ${{ env.GIT_REF }} + ref: ${{ env.CHECKOUT_REF }} fetch-depth: 0 - name: Configure aws credentials @@ -103,7 +106,7 @@ jobs: role-session-name: "split-${{ matrix.goarch }}" - id: cache - uses: actions/cache@v4 + uses: actions/cache@v4.1.1 with: path: dist/${{ matrix.dist_name }} key: chainlink-${{ matrix.goarch }}-${{ github.sha }} @@ -125,9 +128,9 @@ jobs: release-type: ${{ steps.get-image-tag.outputs.release-type }} steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v4.2.1 with: - ref: ${{ env.GIT_REF }} + ref: ${{ env.CHECKOUT_REF }} - name: Get image tag id: get-image-tag diff --git a/.github/workflows/ci-core.yml b/.github/workflows/ci-core.yml index 3a32d7e12c7..48977cee35e 100644 --- a/.github/workflows/ci-core.yml +++ b/.github/workflows/ci-core.yml @@ -34,8 +34,8 @@ jobs: pull-requests: read outputs: deployment-changes: ${{ steps.match-some.outputs.deployment == 'true' }} - should-run-ci-core: ${{ steps.match-every.outputs.non-ignored == 'true' || steps.match-some.outputs.core-ci == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' }} - should-run-golangci: ${{ steps.match-every.outputs.non-integration-tests == 'true' || github.event_name == 'workflow_dispatch' }} + should-run-ci-core: ${{ steps.match-some.outputs.core-ci == 'true' || steps.match-every.outputs.non-ignored == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' }} + should-run-golangci: ${{ steps.match-some.outputs.golang-ci == 'true' || steps.match-every.outputs.non-ignored == 'true' || github.event_name == 'workflow_dispatch' }} runs-on: ubuntu-latest steps: - name: Checkout the repo @@ -48,19 +48,25 @@ jobs: # "if any changed file matches one or more of the conditions" (https://github.com/dorny/paths-filter/issues/225) predicate-quantifier: some # deployment - any changes to files in `deployments/` + # core-ci - any changes that could affect this workflow definition + # golang-ci - any changes that could affect the linting result filters: | deployment: - 'deployment/**' core-ci: - '.github/workflows/ci-core.yml' - '.github/actions/**' + golang-ci: + - '.golangci.yml' + - '.github/workflows/ci-core.yml' + - '.github/actions/**' - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 id: match-every with: # "if any changed file match all of the conditions" (https://github.com/dorny/paths-filter/issues/225) predicate-quantifier: every # non-integration-tests - only changes made outside of the `integration-tests` directory - # everything-except-ignored - only changes except for the negated ones + # non-ignored - only changes except for the negated ones # - This is opt-in on purpose. To be safe, new files are assumed to have an affect on CI Core unless listed here specifically. filters: | non-integration-tests: @@ -103,9 +109,6 @@ jobs: - name: Golang Lint uses: ./.github/actions/golangci-lint if: ${{ needs.filter.outputs.should-run-golangci == 'true' }} - with: - id: core - name: lint - name: Notify Slack if: ${{ failure() && needs.run-frequency.outputs.one-per-day-frequency == 'true' }} uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 @@ -460,7 +463,7 @@ jobs: SONAR_SCANNER_OPTS: "-Xms6g -Xmx8g" trigger-flaky-test-detection-for-root-project: - name: Find New Flaky Tests In Root Project + name: Find New Flaky Tests In Chainlink Project uses: ./.github/workflows/find-new-flaky-tests.yml if: ${{ github.event_name == 'pull_request' }} with: @@ -468,12 +471,11 @@ jobs: projectPath: '.' baseRef: ${{ github.base_ref }} headRef: ${{ github.head_ref }} - runThreshold: '1' - runWithRace: true + runThreshold: '0.99' findByTestFilesDiff: true findByAffectedPackages: false slackNotificationAfterTestsChannelId: 'C07TRF65CNS' #flaky-test-detector-notifications - extraArgs: '{ "skipped_tests": "TestChainComponents" }' + extraArgs: '{ "skipped_tests": "TestChainComponents", "run_with_race": "true", "print_failed_tests": "true", "test_repeat_count": "3", "min_pass_ratio": "0.01" }' secrets: SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} @@ -487,12 +489,11 @@ jobs: projectPath: 'deployment' baseRef: ${{ github.base_ref }} headRef: ${{ github.head_ref }} - runThreshold: '1' - runWithRace: true + runThreshold: '0.99' findByTestFilesDiff: true findByAffectedPackages: false slackNotificationAfterTestsChannelId: 'C07TRF65CNS' #flaky-test-detector-notifications - extraArgs: '{ "skipped_tests": "TestAddLane" }' + extraArgs: '{ "skipped_tests": "TestAddLane", "run_with_race": "true", "print_failed_tests": "true", "test_repeat_count": "3", "min_pass_ratio": "0.01" }' secrets: SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} diff --git a/.github/workflows/find-new-flaky-tests.yml b/.github/workflows/find-new-flaky-tests.yml index fb3676b30c8..ee27ac37562 100644 --- a/.github/workflows/find-new-flaky-tests.yml +++ b/.github/workflows/find-new-flaky-tests.yml @@ -1,4 +1,4 @@ -name: Find New Flaky Tests +name: Find Flaky Tests on: workflow_call: @@ -19,17 +19,17 @@ on: headRef: required: false type: string - description: 'The head reference or branch to compare changes for detecting flaky tests. Default is the current branch.' + description: 'The head reference or branch to compare changes for detecting flaky tests. Default is the current branch.' + runAllTests: + required: false + type: boolean + description: 'Run all tests in the project.' + default: false runThreshold: required: false type: string description: 'The threshold for the number of times a test can fail before being considered flaky.' - default: '0.8' - runWithRace: - required: false - type: boolean - description: 'Run tests with -race flag.' - default: true + default: '0.9' findByTestFilesDiff: required: false type: boolean @@ -56,18 +56,25 @@ on: env: GIT_HEAD_REF: ${{ inputs.headRef || github.ref }} SKIPPED_TESTS: ${{ fromJson(inputs.extraArgs)['skipped_tests'] || '' }} # Comma separated list of test names to skip running in the flaky detector. Related issue: TT-1823 - MAX_GROUP_SIZE: ${{ fromJson(inputs.extraArgs)['max_group_size'] || '8' }} # The maximum number of jobs to run in parallel when running tests. - RUN_COUNT: ${{ fromJson(inputs.extraArgs)['run_count'] || '5' }} # The number of times to run the tests to detect flaky tests. + DEFAULT_MAX_RUNNER_COUNT: ${{ fromJson(inputs.extraArgs)['default_max_runner_count'] || '8' }} # The default maximum number of GitHub runners to use for parallel test execution. + ALL_TESTS_RUNNER_COUNT: ${{ fromJson(inputs.extraArgs)['all_tests_runner_count'] || '2' }} # The number of GitHub runners to use when running all tests `runAllTests=true`. + TEST_REPEAT_COUNT: ${{ fromJson(inputs.extraArgs)['test_repeat_count'] || '5' }} # The number of times each runner should run a test to detect flaky tests. + RUN_WITH_RACE: ${{ fromJson(inputs.extraArgs)['run_with_race'] || 'true' }} # Whether to run tests with -race flag. + ALL_TESTS_RUNNER: ${{ fromJson(inputs.extraArgs)['all_tests_runner'] || 'ubuntu22.04-32cores-128GB' }} # The runner to use for running all tests. + DEFAULT_RUNNER: 'ubuntu-latest' # The default runner to use for running tests. + UPLOAD_ALL_TEST_RESULTS: ${{ fromJson(inputs.extraArgs)['upload_all_test_results'] || 'false' }} # Whether to upload all test results as artifacts. + PRINT_FAILED_TESTS: ${{ fromJson(inputs.extraArgs)['print_failed_tests'] || 'false' }} # Whether to print failed tests in the GitHub console. + MIN_PASS_RATIO: ${{ fromJson(inputs.extraArgs)['min_pass_ratio'] || '0.001' }} # The minimum pass ratio for a test to be considered as flaky. Used to distinguish between tests that are truly flaky (with inconsistent results) and those that are consistently failing. Set to 0 if you want to consider all failed tests as flaky. jobs: - find-tests: - name: Find Tests To Run + get-tests: + name: Get Tests To Run runs-on: ubuntu-latest outputs: matrix: ${{ steps.split-packages.outputs.matrix }} workflow_id: ${{ steps.gen_id.outputs.workflow_id }} changed_test_files: ${{ steps.find-changed-test-files.outputs.test_files }} - affected_test_packages: ${{ steps.find-tests.outputs.packages }} + affected_test_packages: ${{ steps.get-tests.outputs.packages }} git_head_sha: ${{ steps.get_commit_sha.outputs.git_head_sha }} git_head_short_sha: ${{ steps.get_commit_sha.outputs.git_head_short_sha }} steps: @@ -93,10 +100,11 @@ jobs: - name: Install flakeguard shell: bash - run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@cb4c307f6f0a79a20097129cda7c151d8c5b5d28 + run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@897bca304fc9f0e68b87579558750c4a3e83adec - name: Find new or updated test packages - id: find-tests + if: ${{ inputs.runAllTests == false }} + id: get-tests shell: bash env: # Needed to run go test -list @@ -110,6 +118,7 @@ jobs: echo "packages=$PACKAGES" >> $GITHUB_OUTPUT - name: Find changed test files + if: ${{ inputs.runAllTests == false }} id: find-changed-test-files shell: bash env: @@ -125,11 +134,25 @@ jobs: - name: Split test packages into groups id: split-packages - if: steps.find-tests.outputs.packages != '' shell: bash run: | - PACKAGES=(${{ steps.find-tests.outputs.packages }}) - DESIRED_GROUP_COUNT=$((${{ env.MAX_GROUP_SIZE }})) + if [[ "${{ inputs.runAllTests }}" == "true" ]]; then + # Use ALL_TESTS_RUNNER for a specified number of groups, each with "./..." to run all tests + ALL_TESTS_RUNNER_COUNT=${{ env.ALL_TESTS_RUNNER_COUNT }} + + # Create the JSON array dynamically based on ALL_TESTS_RUNNER_COUNT + json_groups=$(jq -nc --argjson count "$ALL_TESTS_RUNNER_COUNT" \ + '[range(0; $count) | { "testPackages": "./...", "runs_on": "'"${{ env.ALL_TESTS_RUNNER }}"'" }]') + + echo "$json_groups" + echo "matrix<> $GITHUB_OUTPUT + echo "$json_groups" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + exit 0 + fi + + PACKAGES=(${{ steps.get-tests.outputs.packages }}) + DESIRED_GROUP_COUNT=$((${{ env.DEFAULT_MAX_RUNNER_COUNT }})) TOTAL_PACKAGES=${#PACKAGES[@]} # Number of groups should be no more than the number of packages @@ -150,15 +173,17 @@ jobs: # Extract the packages for the current group if [[ $group_size -gt 0 ]]; then group=("${PACKAGES[@]:current_index:group_size}") - groups+=("$(IFS=,; echo "${group[*]}")") + groups+=("{\"testPackages\":\"$(IFS=,; echo "${group[*]}")\", \"runs_on\":\"${{ env.DEFAULT_RUNNER }}\"}") current_index=$(($current_index + $group_size)) fi done # Convert groups array into a JSON array - json_groups=$(printf '%s\n' "${groups[@]}" | jq -R . | jq -cs .) - echo $json_groups - echo "matrix=$json_groups" >> $GITHUB_OUTPUT + json_groups=$(printf '%s\n' "${groups[@]}" | jq -s .) + echo "$json_groups" + echo "matrix<> $GITHUB_OUTPUT + echo "$json_groups" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT - name: Generate random workflow id id: gen_id @@ -167,13 +192,14 @@ jobs: run-tests: name: Run Tests - needs: find-tests - runs-on: ubuntu-latest - if: ${{ needs.find-tests.outputs.matrix != '' }} + needs: get-tests + runs-on: ${{ matrix.runs_on }} + if: ${{ needs.get-tests.outputs.matrix != '' && needs.get-tests.outputs.matrix != '[]' }} + timeout-minutes: 90 strategy: fail-fast: false matrix: - testPackages: ${{ fromJson(needs.find-tests.outputs.matrix) }} + include: ${{ fromJson(needs.get-tests.outputs.matrix) }} env: DB_URL: postgresql://postgres:postgres@localhost:5432/chainlink_test?sslmode=disable steps: @@ -233,11 +259,11 @@ jobs: - name: Install flakeguard shell: bash - run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@cb4c307f6f0a79a20097129cda7c151d8c5b5d28 + run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@897bca304fc9f0e68b87579558750c4a3e83adec - name: Run tests with flakeguard shell: bash - run: flakeguard run --project-path=${{ inputs.projectPath }} --test-packages=${{ matrix.testPackages }} --run-count=${{ env.RUN_COUNT }} --threshold=${{ inputs.runThreshold }} --race=${{ inputs.runWithRace }} --skip-tests=${{ env.SKIPPED_TESTS }} --output-json=test-result.json + run: flakeguard run --project-path=${{ inputs.projectPath }} --test-packages=${{ matrix.testPackages }} --run-count=${{ env.TEST_REPEAT_COUNT }} --min-pass-ratio=${{ env.MIN_PASS_RATIO }} --threshold=${{ inputs.runThreshold }} --race=${{ env.RUN_WITH_RACE }} --skip-tests=${{ env.SKIPPED_TESTS }} --print-failed-tests=${{ env.PRINT_FAILED_TESTS }} --output-json=test-result.json env: CL_DATABASE_URL: ${{ env.DB_URL }} @@ -245,12 +271,12 @@ jobs: if: always() uses: actions/upload-artifact@v4.4.3 with: - name: test-result-${{ needs.find-tests.outputs.workflow_id }}-${{ steps.gen_id.outputs.id }} + name: test-result-${{ needs.get-tests.outputs.workflow_id }}-${{ steps.gen_id.outputs.id }} path: test-result.json - retention-days: 7 + retention-days: 1 report: - needs: [find-tests, run-tests] + needs: [get-tests, run-tests] if: always() name: Report runs-on: ubuntu-latest @@ -261,9 +287,9 @@ jobs: id: set_project_path_pretty run: | if [ "${{ inputs.projectPath }}" = "." ]; then - echo "path=./go.mod" >> $GITHUB_OUTPUT + echo "path=github.com/${{ github.repository }}" >> $GITHUB_OUTPUT else - echo "path=${{ inputs.projectPath }}/go.mod" >> $GITHUB_OUTPUT + echo "path=github.com/${{ github.repository }}/${{ inputs.projectPath }}" >> $GITHUB_OUTPUT fi - name: Download all test result artifacts @@ -271,8 +297,12 @@ jobs: with: path: test_results pattern: - test-result-${{ needs.find-tests.outputs.workflow_id }}-* - + test-result-${{ needs.get-tests.outputs.workflow_id }}-* + + - name: Install flakeguard + shell: bash + run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@897bca304fc9f0e68b87579558750c4a3e83adec + - name: Set combined test results id: set_test_results shell: bash @@ -281,12 +311,28 @@ jobs: if [ -d "test_results" ]; then cd test_results ls -R . - find . -name '*.json' -exec cat {} + | jq -s 'add | sort_by(.PassRatio)' > all_tests.json - ALL_TESTS_COUNT=$(jq 'length' all_tests.json) + + # Fix flakeguard binary path + PATH=$PATH:$(go env GOPATH)/bin + export PATH + + # Use flakeguard aggregate-all to aggregate test results + flakeguard aggregate-all --results-path . --output-results ../all_tests.json + + # Count all tests + ALL_TESTS_COUNT=$(jq 'length' ../all_tests.json) echo "All tests count: $ALL_TESTS_COUNT" echo "all_tests_count=$ALL_TESTS_COUNT" >> "$GITHUB_OUTPUT" - jq -c 'map(select(.PassRatio < ($runThreshold | tonumber) and .Skipped != true)) | map(.PassRatio |= (. * 100 | tostring + "%"))' all_tests.json --arg runThreshold '${{ inputs.runThreshold }}' > failed_tests.json - FAILED_TESTS_COUNT=$(jq 'length' failed_tests.json) + + # Use flakeguard aggregate-failed to filter and output failed tests based on PassRatio threshold + flakeguard aggregate-failed --threshold "${{ inputs.runThreshold }}" --min-pass-ratio=${{ env.MIN_PASS_RATIO }} --results-path . --output-results ../failed_tests.json --output-logs ../failed_test_logs.json + + # Count failed tests + if [ -f "../failed_tests.json" ]; then + FAILED_TESTS_COUNT=$(jq 'length' ../failed_tests.json) + else + FAILED_TESTS_COUNT=0 + fi echo "Failed tests count: $FAILED_TESTS_COUNT" echo "failed_tests_count=$FAILED_TESTS_COUNT" >> "$GITHUB_OUTPUT" else @@ -305,47 +351,73 @@ jobs: if: ${{ fromJson(steps.set_test_results.outputs.failed_tests_count) > 0 }} uses: actions/upload-artifact@v4.4.3 with: - name: failed_tests.json - path: test_results/failed_tests.json + path: failed_tests.json + name: failed-test-results.json retention-days: 7 + - name: Upload Failed Test Logs as Artifact + if: ${{ fromJson(steps.set_test_results.outputs.failed_tests_count) > 0 }} + uses: actions/upload-artifact@v4.4.3 + with: + path: failed_test_logs.json + name: failed-test-logs.json + retention-days: 7 + + - name: Upload All Test Results as Artifact + if: ${{ fromJson(steps.set_test_results.outputs.all_tests_count) > 0 && env.UPLOAD_ALL_TEST_RESULTS == 'true' }} + uses: actions/upload-artifact@v4.4.3 + with: + path: all_tests.json + name: all-test-results.json + retention-days: 7 + - name: Create ASCII table with failed test results if: ${{ fromJson(steps.set_test_results.outputs.failed_tests_count) > 0 }} shell: bash run: | - jq -r '["TestPackage", "TestName", "PassRatio", "RunCount", "Skipped"], ["---------", "---------", "---------", "---------", "---------"], (.[] | [.TestPackage, .TestName, .PassRatio, .Runs, .Skipped]) | @tsv' test_results/failed_tests.json | column -t -s$'\t' > test_results/failed_tests_ascii.txt - cat test_results/failed_tests_ascii.txt + jq -r '["TestPackage", "TestName", "PassRatio", "RunCount", "Skipped"], ["---------", "---------", "---------", "---------", "---------"], (.[] | [.TestPackage, .TestName, .PassRatioPercentage, .Runs, .Skipped]) | @tsv' failed_tests.json | column -t -s$'\t' > failed_tests_ascii.txt + cat failed_tests_ascii.txt - name: Create ASCII table with all test results if: ${{ fromJson(steps.set_test_results.outputs.all_tests_count) > 0 }} shell: bash run: | - jq -r '["TestPackage", "TestName", "PassRatio", "RunCount", "Skipped"], ["---------", "---------", "---------", "---------", "---------"], (.[] | [.TestPackage, .TestName, .PassRatio, .Runs, .Skipped]) | @tsv' test_results/all_tests.json | column -t -s$'\t' > test_results/all_tests_ascii.txt - cat test_results/all_tests_ascii.txt + jq -r '["TestPackage", "TestName", "PassRatio", "RunCount", "Skipped"], ["---------", "---------", "---------", "---------", "---------"], (.[] | [.TestPackage, .TestName, .PassRatioPercentage, .Runs, .Skipped]) | @tsv' all_tests.json | column -t -s$'\t' > all_tests_ascii.txt + cat all_tests_ascii.txt - - name: Create GitHub Summary + - name: Create GitHub Summary (General) + run: | + echo "## Flaky Test Detection Report for ${{ steps.set_project_path_pretty.outputs.path }} Project" >> $GITHUB_STEP_SUMMARY + + - name: Create GitHub Summary (Comparative Test Analysis) + if: ${{ inputs.runAllTests == false }} run: | - echo "## Flaky Test Detection Summary" >> $GITHUB_STEP_SUMMARY echo "### Comparative Test Analysis" >> $GITHUB_STEP_SUMMARY - echo "Checked changes between \`${{ inputs.baseRef }}\` and \`${{ env.GIT_HEAD_REF }}\` for ${{ steps.set_project_path_pretty.outputs.path }} project. See all changes [here](${{ inputs.repoUrl }}/compare/${{ inputs.baseRef }}...${{ needs.find-tests.outputs.git_head_sha }}#files_bucket)." >> $GITHUB_STEP_SUMMARY + echo "Checked changes between \`${{ inputs.baseRef }}\` and \`${{ env.GIT_HEAD_REF }}\`. See all changes [here](${{ inputs.repoUrl }}/compare/${{ inputs.baseRef }}...${{ needs.get-tests.outputs.git_head_sha }}#files_bucket)." >> $GITHUB_STEP_SUMMARY + + - name: Create GitHub Summary (All Tests) + if: ${{ inputs.runAllTests == 'true' }} + run: | + echo "### Running All Tests" >> $GITHUB_STEP_SUMMARY + echo "All tests are being executed as \`runAllTests\` is set to true." >> $GITHUB_STEP_SUMMARY - name: Append Changed Test Files to GitHub Summary - if: ${{ needs.find-tests.outputs.changed_test_files != '' && inputs.findByTestFilesDiff && !inputs.findByAffectedPackages }} + if: ${{ needs.get-tests.outputs.changed_test_files != '' && inputs.findByTestFilesDiff && !inputs.findByAffectedPackages }} run: | echo "### Changed Test Files" >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY - IFS=' ' read -ra ADDR <<< "${{ needs.find-tests.outputs.changed_test_files }}" + IFS=' ' read -ra ADDR <<< "${{ needs.get-tests.outputs.changed_test_files }}" for file in "${ADDR[@]}"; do echo "$file" >> $GITHUB_STEP_SUMMARY done echo '```' >> $GITHUB_STEP_SUMMARY - name: Append Affected Test Packages to GitHub Summary - if: ${{ needs.find-tests.outputs.affected_test_packages != '' }} + if: ${{ needs.get-tests.outputs.affected_test_packages != '' }} run: | echo "### Affected Test Packages" >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY - IFS=' ' read -ra ADDR <<< "${{ needs.find-tests.outputs.affected_test_packages }}" + IFS=' ' read -ra ADDR <<< "${{ needs.get-tests.outputs.affected_test_packages }}" for package in "${ADDR[@]}"; do echo "$package" >> $GITHUB_STEP_SUMMARY done @@ -355,55 +427,76 @@ jobs: if: ${{ fromJson(steps.set_test_results.outputs.failed_tests_count) > 0 }} id: read_failed_tests run: | - file_content=$(cat test_results/failed_tests_ascii.txt) + file_content=$(cat failed_tests_ascii.txt) echo "failed_tests_content<> $GITHUB_OUTPUT echo "$file_content" >> $GITHUB_OUTPUT echo "EOF" >> $GITHUB_OUTPUT - - name: Append Failed Tests to GitHub Summary + - name: Calculate Test Repeat Count + id: calculate_test_repeat_count + shell: bash + run: | + # Convert environment variables to integers + ALL_TESTS_RUNNER_COUNT=${{ env.ALL_TESTS_RUNNER_COUNT }} + TEST_REPEAT_COUNT=${{ env.TEST_REPEAT_COUNT }} + + # If runAllTests input is true, multiply the number of runners by the test repeat count as each runner runs all tests + # Otherwise, use the test repeat count as each runner runs unique tests + if [[ "${{ inputs.runAllTests }}" == "true" ]]; then + test_repeat_count=$(( ALL_TESTS_RUNNER_COUNT * TEST_REPEAT_COUNT )) + else + test_repeat_count=$TEST_REPEAT_COUNT + fi + echo "test_repeat_count=$test_repeat_count" >> $GITHUB_OUTPUT + + - name: Append Flaky Tests to GitHub Summary if: ${{ fromJson(steps.set_test_results.outputs.failed_tests_count) > 0 }} run: | - threshold_percentage=$(echo "${{ inputs.runThreshold }}" | awk '{printf "%.0f", $1 * 100}') - echo "### Failed Tests :x:" >> $GITHUB_STEP_SUMMARY - echo "Ran \`${{ steps.set_test_results.outputs.all_tests_count }}\` tests in total for all affected test packages. Below are the tests identified as flaky, with a pass ratio lower than the \`${threshold_percentage}%\` threshold:" >> $GITHUB_STEP_SUMMARY + threshold_percentage=$(echo "${{ inputs.runThreshold }}" | awk '{printf "%.2f", $1 * 100}') + min_pass_ratio_percentage=$(echo "${{ env.MIN_PASS_RATIO }}" | awk '{printf "%.2f", $1 * 100}') + echo "### Flaky Tests :x:" >> $GITHUB_STEP_SUMMARY + echo "Ran ${{ steps.set_test_results.outputs.all_tests_count }} unique tests ${{ steps.calculate_test_repeat_count.outputs.test_repeat_count }} times. Below are the tests identified as flaky, with a pass ratio lower than the ${threshold_percentage}% threshold:" >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY - cat test_results/failed_tests_ascii.txt >> $GITHUB_STEP_SUMMARY + cat failed_tests_ascii.txt >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY - echo "For detailed logs of the failed tests, please refer to the 'failed_tests.json' file in the Artifacts section at the bottom of the page." >> $GITHUB_STEP_SUMMARY + echo "For detailed logs of the failed tests, please refer to the failed-test-results.json and failed-test-logs.json files in the Artifacts section at the bottom of the page. failed-test-logs.json contains all outputs from failed tests." >> $GITHUB_STEP_SUMMARY - - name: Append Success Note if All Tests Passed + - name: Append Success Note if No Flaky Tests Found if: ${{ fromJson(steps.set_test_results.outputs.all_tests_count) > 0 && fromJson(steps.set_test_results.outputs.failed_tests_count) == 0 }} run: | - echo "### All Tests Passed! :white_check_mark:" >> $GITHUB_STEP_SUMMARY - echo "Ran \`${{ steps.set_test_results.outputs.all_tests_count }}\` tests in total and found no flakes." >> $GITHUB_STEP_SUMMARY + echo "### No Flaky Tests Found! :white_check_mark:" >> $GITHUB_STEP_SUMMARY + echo "Ran \`${{ steps.set_test_results.outputs.all_tests_count }}\` unique tests ${{ steps.calculate_test_repeat_count.outputs.test_repeat_count }} times and found no flakes." >> $GITHUB_STEP_SUMMARY - name: Append Additional Info to GitHub Summary if: ${{ fromJson(steps.set_test_results.outputs.all_tests_count) > 0 }} run: | echo "### Settings" >> $GITHUB_STEP_SUMMARY - threshold_percentage=$(echo "${{ inputs.runThreshold }}" | awk '{printf "%.0f", $1 * 100}') + threshold_percentage=$(echo "${{ inputs.runThreshold }}" | awk '{printf "%.2f", $1 * 100}') + min_pass_ratio_percentage=$(echo "${{ env.MIN_PASS_RATIO }}" | awk '{printf "%.2f", $1 * 100}') echo "| **Setting** | **Value** |" >> $GITHUB_STEP_SUMMARY echo "|-------------------------|------------|" >> $GITHUB_STEP_SUMMARY + echo "| Go Project | ${{ steps.set_project_path_pretty.outputs.path }} |" >> $GITHUB_STEP_SUMMARY + echo "| Minimum Pass Ratio | ${min_pass_ratio_percentage}% |" >> $GITHUB_STEP_SUMMARY echo "| Flakiness Threshold | ${threshold_percentage}% |" >> $GITHUB_STEP_SUMMARY - echo "| Test Run Count | ${{ env.RUN_COUNT }} |" >> $GITHUB_STEP_SUMMARY - echo "| Race Detection | ${{ inputs.runWithRace }} |" >> $GITHUB_STEP_SUMMARY + echo "| Test Run Count | ${{ steps.calculate_test_repeat_count.outputs.test_repeat_count }} |" >> $GITHUB_STEP_SUMMARY + echo "| Race Detection | ${{ env.RUN_WITH_RACE }} |" >> $GITHUB_STEP_SUMMARY echo "| Excluded Tests | ${{ env.SKIPPED_TESTS }} |" >> $GITHUB_STEP_SUMMARY - name: Append No Tests Found Message to GitHub Summary if: ${{ fromJson(steps.set_test_results.outputs.all_tests_count) == 0 }} run: | echo "### No Tests To Execute" >> $GITHUB_STEP_SUMMARY - echo "No updated or new tests found for \`${{ steps.set_project_path_pretty.outputs.path }}\` project. The flaky detector will not run." >> $GITHUB_STEP_SUMMARY + echo "No updated or new Go tests found for ${{ steps.set_project_path_pretty.outputs.path }} project. The flaky detector will not run." >> $GITHUB_STEP_SUMMARY - name: Post comment on PR if flaky tests found if: ${{ fromJson(steps.set_test_results.outputs.failed_tests_count) > 0 && github.event_name == 'pull_request' }} uses: actions/github-script@v7 env: MESSAGE_BODY_1: '### Flaky Test Detector for `${{ steps.set_project_path_pretty.outputs.path }}` project has failed :x:' - MESSAGE_BODY_2: 'Ran new or updated tests between `${{ inputs.baseRef }}` and ${{ needs.find-tests.outputs.git_head_sha }} (`${{ env.GIT_HEAD_REF }}`).' - MESSAGE_BODY_3: ${{ format('[View Flaky Detector Details]({0}/{1}/actions/runs/{2}) | [Compare Changes]({3}/compare/{4}...{5}#files_bucket)', github.server_url, github.repository, github.run_id, inputs.repoUrl, github.base_ref, needs.find-tests.outputs.git_head_sha) }} - MESSAGE_BODY_4: '#### Failed Tests' - MESSAGE_BODY_5: 'Ran ${{ steps.set_test_results.outputs.all_tests_count }} tests in total for all affected test packages. Below are the tests identified as flaky, with a pass ratio lower than the ${{ steps.calculate_threshold.outputs.threshold_percentage }}% threshold:' + MESSAGE_BODY_2: 'Ran new or updated tests between `${{ inputs.baseRef }}` and ${{ needs.get-tests.outputs.git_head_sha }} (`${{ env.GIT_HEAD_REF }}`).' + MESSAGE_BODY_3: ${{ format('[View Flaky Detector Details]({0}/{1}/actions/runs/{2}) | [Compare Changes]({3}/compare/{4}...{5}#files_bucket)', github.server_url, github.repository, github.run_id, inputs.repoUrl, github.base_ref, needs.get-tests.outputs.git_head_sha) }} + MESSAGE_BODY_4: '#### Flaky Tests' + MESSAGE_BODY_5: 'Ran ${{ steps.set_test_results.outputs.all_tests_count }} unique tests. Below are the tests identified as flaky, with a pass ratio lower than the ${{ steps.calculate_threshold.outputs.threshold_percentage }}% threshold:' MESSAGE_BODY_6: '```' MESSAGE_BODY_7: '${{ steps.read_failed_tests.outputs.failed_tests_content }}' MESSAGE_BODY_8: '```' @@ -450,21 +543,21 @@ jobs: "type": "section", "text": { "type": "mrkdwn", - "text": "Flaky Test Detector for ${{ steps.set_project_path_pretty.outputs.path }} project - ${{ contains(join(needs.*.result, ','), 'failure') && 'Failed :x:' || contains(join(needs.*.result, ','), 'cancelled') && 'Was cancelled :warning:' || 'Passed :white_check_mark:' }}" + "text": "Flaky Test Detector for `${{ steps.set_project_path_pretty.outputs.path }}` project - ${{ contains(join(needs.*.result, ','), 'failure') && 'Failed :x:' || contains(join(needs.*.result, ','), 'cancelled') && 'Was cancelled :warning:' || 'Passed :white_check_mark:' }}" } }, { "type": "section", "text": { "type": "mrkdwn", - "text": "Ran changed tests between `${{ inputs.baseRef }}` and `${{ needs.find-tests.outputs.git_head_short_sha }}` (`${{ env.GIT_HEAD_REF }}`)." + "text": "Ran changed tests between `${{ inputs.baseRef }}` and `${{ needs.get-tests.outputs.git_head_short_sha }}` (`${{ env.GIT_HEAD_REF }}`)." } }, { "type": "section", "text": { "type": "mrkdwn", - "text": "${{ format('<{0}/{1}/actions/runs/{2}|View Flaky Detector Details> | <{3}/compare/{4}...{5}#files_bucket|Compare Changes>{6}', github.server_url, github.repository, github.run_id, inputs.repoUrl, inputs.baseRef, needs.find-tests.outputs.git_head_sha, github.event_name == 'pull_request' && format(' | <{0}|View PR>', github.event.pull_request.html_url) || '') }}" + "text": "${{ format('<{0}/{1}/actions/runs/{2}|View Flaky Detector Details> | <{3}/compare/{4}...{5}#files_bucket|Compare Changes>{6}', github.server_url, github.repository, github.run_id, inputs.repoUrl, inputs.baseRef, needs.get-tests.outputs.git_head_sha, github.event_name == 'pull_request' && format(' | <{0}|View PR>', github.event.pull_request.html_url) || '') }}" } } ] diff --git a/.github/workflows/run-find-new-flaky-tests.yml b/.github/workflows/run-find-new-flaky-tests.yml index 238da78df2b..d1318719349 100644 --- a/.github/workflows/run-find-new-flaky-tests.yml +++ b/.github/workflows/run-find-new-flaky-tests.yml @@ -1,4 +1,4 @@ -name: Find New Flaky Tests +name: Find Flaky Tests on: workflow_dispatch: @@ -22,16 +22,16 @@ on: required: false type: string description: 'The head reference or branch to compare changes for detecting flaky tests. Default is the current branch.' + runAllTests: + required: false + type: boolean + description: 'Run all tests in the project.' + default: false runThreshold: required: false type: string description: 'The threshold for the number of times a test can fail before being considered flaky.' default: '0.8' - runWithRace: - required: false - type: boolean - description: 'Run tests with -race flag.' - default: true findByTestFilesDiff: required: false type: boolean @@ -54,7 +54,7 @@ on: jobs: trigger-flaky-test-detection: - name: Find New Flaky Tests + name: Find Flaky Tests uses: ./.github/workflows/find-new-flaky-tests.yml with: repoUrl: ${{ inputs.repoUrl }} @@ -62,7 +62,7 @@ jobs: projectPath: ${{ inputs.projectPath }} headRef: ${{ inputs.headRef }} runThreshold: ${{ inputs.runThreshold }} - runWithRace: ${{ inputs.runWithRace }} + runAllTests: ${{ inputs.runAllTests }} findByTestFilesDiff: ${{ inputs.findByTestFilesDiff }} findByAffectedPackages: ${{ inputs.findByAffectedPackages }} slackNotificationAfterTestsChannelId: ${{ inputs.slack_notification_after_tests_channel_id }} diff --git a/.github/workflows/run-nightly-flaky-test-detector.yml b/.github/workflows/run-nightly-flaky-test-detector.yml new file mode 100644 index 00000000000..615233a6106 --- /dev/null +++ b/.github/workflows/run-nightly-flaky-test-detector.yml @@ -0,0 +1,21 @@ +name: Run Nightly Flaky Test Detector + +on: + schedule: + # Run every night at 3:00 AM UTC + - cron: '0 3 * * *' + +jobs: + trigger-flaky-test-detection: + name: Find Flaky Tests + uses: ./.github/workflows/find-new-flaky-tests.yml + with: + repoUrl: 'https://github.com/smartcontractkit/chainlink' + baseRef: 'origin/develop' + projectPath: '.' + runThreshold: '1' + runAllTests: 'true' + extraArgs: '{ "skipped_tests": "TestChainComponents", "test_repeat_count": "5", "all_tests_runner": "ubuntu22.04-32cores-128GB", "all_tests_runner_count": "3", "min_pass_ratio": "0" }' + secrets: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} + \ No newline at end of file diff --git a/core/capabilities/compute/compute.go b/core/capabilities/compute/compute.go index 7e6961d2e8a..78a4cc1e033 100644 --- a/core/capabilities/compute/compute.go +++ b/core/capabilities/compute/compute.go @@ -8,6 +8,7 @@ import ( "fmt" "net/http" "strings" + "sync" "time" "github.com/google/uuid" @@ -19,6 +20,7 @@ import ( capabilitiespb "github.com/smartcontractkit/chainlink-common/pkg/capabilities/pb" "github.com/smartcontractkit/chainlink-common/pkg/custmsg" "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" coretypes "github.com/smartcontractkit/chainlink-common/pkg/types/core" "github.com/smartcontractkit/chainlink-common/pkg/workflows/wasm/host" wasmpb "github.com/smartcontractkit/chainlink-common/pkg/workflows/wasm/pb" @@ -73,7 +75,8 @@ var ( var _ capabilities.ActionCapability = (*Compute)(nil) type Compute struct { - log logger.Logger + stopCh services.StopChan + log logger.Logger // emitter is used to emit messages from the WASM module to a configured collector. emitter custmsg.MessageEmitter @@ -82,9 +85,13 @@ type Compute struct { // transformer is used to transform a values.Map into a ParsedConfig struct on each execution // of a request. - transformer ConfigTransformer + transformer *transformer outgoingConnectorHandler *webapi.OutgoingConnectorHandler idGenerator func() string + + numWorkers int + queue chan request + wg sync.WaitGroup } func (c *Compute) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { @@ -100,35 +107,76 @@ func generateID(binary []byte) string { return fmt.Sprintf("%x", id) } -func copyRequest(req capabilities.CapabilityRequest) capabilities.CapabilityRequest { - return capabilities.CapabilityRequest{ - Metadata: req.Metadata, - Inputs: req.Inputs.CopyMap(), - Config: req.Config.CopyMap(), +func (c *Compute) Execute(ctx context.Context, request capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) { + ch, err := c.enqueueRequest(ctx, request) + if err != nil { + return capabilities.CapabilityResponse{}, err + } + + select { + case <-c.stopCh: + return capabilities.CapabilityResponse{}, errors.New("service shutting down, aborting request") + case <-ctx.Done(): + return capabilities.CapabilityResponse{}, fmt.Errorf("request cancelled by upstream: %w", ctx.Err()) + case resp := <-ch: + return resp.resp, resp.err } } -func (c *Compute) Execute(ctx context.Context, request capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) { - copied := copyRequest(request) +type request struct { + ch chan response + req capabilities.CapabilityRequest + ctx func() context.Context +} - cfg, err := c.transformer.Transform(copied.Config) +type response struct { + resp capabilities.CapabilityResponse + err error +} + +func (c *Compute) enqueueRequest(ctx context.Context, req capabilities.CapabilityRequest) (<-chan response, error) { + ch := make(chan response) + r := request{ + ch: ch, + req: req, + ctx: func() context.Context { return ctx }, + } + select { + case <-c.stopCh: + return nil, errors.New("service shutting down, aborting request") + case <-ctx.Done(): + return nil, fmt.Errorf("could not enqueue request: %w", ctx.Err()) + case c.queue <- r: + return ch, nil + } +} + +func (c *Compute) execute(ctx context.Context, respCh chan response, req capabilities.CapabilityRequest) { + copiedReq, cfg, err := c.transformer.Transform(req) if err != nil { - return capabilities.CapabilityResponse{}, fmt.Errorf("invalid request: could not transform config: %w", err) + respCh <- response{err: fmt.Errorf("invalid request: could not transform config: %w", err)} + return } id := generateID(cfg.Binary) m, ok := c.modules.get(id) if !ok { - mod, err := c.initModule(id, cfg.ModuleConfig, cfg.Binary, request.Metadata) - if err != nil { - return capabilities.CapabilityResponse{}, err + mod, innerErr := c.initModule(id, cfg.ModuleConfig, cfg.Binary, copiedReq.Metadata) + if innerErr != nil { + respCh <- response{err: innerErr} + return } m = mod } - return c.executeWithModule(ctx, m.module, cfg.Config, request) + resp, err := c.executeWithModule(ctx, m.module, cfg.Config, copiedReq) + select { + case <-c.stopCh: + case <-ctx.Done(): + case respCh <- response{resp: resp, err: err}: + } } func (c *Compute) initModule(id string, cfg *host.ModuleConfig, binary []byte, requestMetadata capabilities.RequestMetadata) (*module, error) { @@ -196,11 +244,35 @@ func (c *Compute) Info(ctx context.Context) (capabilities.CapabilityInfo, error) func (c *Compute) Start(ctx context.Context) error { c.modules.start() + + c.wg.Add(c.numWorkers) + for i := 0; i < c.numWorkers; i++ { + go func() { + innerCtx, cancel := c.stopCh.NewCtx() + defer cancel() + + defer c.wg.Done() + c.worker(innerCtx) + }() + } return c.registry.Add(ctx, c) } +func (c *Compute) worker(ctx context.Context) { + for { + select { + case <-c.stopCh: + return + case req := <-c.queue: + c.execute(req.ctx(), req.ch, req.req) + } + } +} + func (c *Compute) Close() error { c.modules.close() + close(c.stopCh) + c.wg.Wait() return nil } @@ -270,18 +342,31 @@ func (c *Compute) createFetcher() func(ctx context.Context, req *wasmpb.FetchReq } } +const ( + defaultNumWorkers = 3 +) + +type Config struct { + webapi.ServiceConfig + NumWorkers int +} + func NewAction( - config webapi.ServiceConfig, + config Config, log logger.Logger, registry coretypes.CapabilitiesRegistry, handler *webapi.OutgoingConnectorHandler, idGenerator func() string, opts ...func(*Compute), ) *Compute { + if config.NumWorkers == 0 { + config.NumWorkers = defaultNumWorkers + } var ( lggr = logger.Named(log, "CustomCompute") labeler = custmsg.NewLabeler() compute = &Compute{ + stopCh: make(services.StopChan), log: lggr, emitter: labeler, registry: registry, @@ -289,6 +374,8 @@ func NewAction( transformer: NewTransformer(lggr, labeler), outgoingConnectorHandler: handler, idGenerator: idGenerator, + queue: make(chan request), + numWorkers: defaultNumWorkers, } ) diff --git a/core/capabilities/compute/compute_test.go b/core/capabilities/compute/compute_test.go index ec82533f2bb..719bff82edf 100644 --- a/core/capabilities/compute/compute_test.go +++ b/core/capabilities/compute/compute_test.go @@ -32,12 +32,14 @@ const ( validRequestUUID = "d2fe6db9-beb4-47c9-b2d6-d3065ace111e" ) -var defaultConfig = webapi.ServiceConfig{ - RateLimiter: common.RateLimiterConfig{ - GlobalRPS: 100.0, - GlobalBurst: 100, - PerSenderRPS: 100.0, - PerSenderBurst: 100, +var defaultConfig = Config{ + ServiceConfig: webapi.ServiceConfig{ + RateLimiter: common.RateLimiterConfig{ + GlobalRPS: 100.0, + GlobalBurst: 100, + PerSenderRPS: 100.0, + PerSenderBurst: 100, + }, }, } @@ -45,17 +47,17 @@ type testHarness struct { registry *corecapabilities.Registry connector *gcmocks.GatewayConnector log logger.Logger - config webapi.ServiceConfig + config Config connectorHandler *webapi.OutgoingConnectorHandler compute *Compute } -func setup(t *testing.T, config webapi.ServiceConfig) testHarness { +func setup(t *testing.T, config Config) testHarness { log := logger.TestLogger(t) registry := capabilities.NewRegistry(log) connector := gcmocks.NewGatewayConnector(t) idGeneratorFn := func() string { return validRequestUUID } - connectorHandler, err := webapi.NewOutgoingConnectorHandler(connector, config, ghcapabilities.MethodComputeAction, log) + connectorHandler, err := webapi.NewOutgoingConnectorHandler(connector, config.ServiceConfig, ghcapabilities.MethodComputeAction, log) require.NoError(t, err) compute := NewAction(config, log, registry, connectorHandler, idGeneratorFn) diff --git a/core/capabilities/compute/transformer.go b/core/capabilities/compute/transformer.go index 99efcda8323..3b4ae4cfa69 100644 --- a/core/capabilities/compute/transformer.go +++ b/core/capabilities/compute/transformer.go @@ -5,21 +5,13 @@ import ( "fmt" "time" + "github.com/smartcontractkit/chainlink-common/pkg/capabilities" "github.com/smartcontractkit/chainlink-common/pkg/custmsg" "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/values" "github.com/smartcontractkit/chainlink-common/pkg/workflows/wasm/host" ) -type Transformer[T any, U any] interface { - // Transform changes a struct of type T into a struct of type U. Accepts a variadic list of options to modify the - // output struct. - Transform(T, ...func(*U)) (*U, error) -} - -// ConfigTransformer is a Transformer that converts a values.Map into a ParsedConfig struct. -type ConfigTransformer = Transformer[*values.Map, ParsedConfig] - // ParsedConfig is a struct that contains the binary and config for a wasm module, as well as the module config. type ParsedConfig struct { Binary []byte @@ -36,25 +28,41 @@ type transformer struct { emitter custmsg.MessageEmitter } +func shallowCopy(m *values.Map) *values.Map { + to := values.EmptyMap() + + for k, v := range m.Underlying { + to.Underlying[k] = v + } + + return to +} + // Transform attempts to read a valid ParsedConfig from an arbitrary values map. The map must // contain the binary and config keys. Optionally the map may specify wasm module specific // configuration values such as maxMemoryMBs, timeout, and tickInterval. Default logger and // emitter for the module are taken from the transformer instance. Override these values with // the functional options. -func (t *transformer) Transform(in *values.Map, opts ...func(*ParsedConfig)) (*ParsedConfig, error) { - binary, err := popValue[[]byte](in, binaryKey) +func (t *transformer) Transform(req capabilities.CapabilityRequest, opts ...func(*ParsedConfig)) (capabilities.CapabilityRequest, *ParsedConfig, error) { + copiedReq := capabilities.CapabilityRequest{ + Inputs: req.Inputs, + Metadata: req.Metadata, + Config: shallowCopy(req.Config), + } + + binary, err := popValue[[]byte](copiedReq.Config, binaryKey) if err != nil { - return nil, NewInvalidRequestError(err) + return capabilities.CapabilityRequest{}, nil, NewInvalidRequestError(err) } - config, err := popValue[[]byte](in, configKey) + config, err := popValue[[]byte](copiedReq.Config, configKey) if err != nil { - return nil, NewInvalidRequestError(err) + return capabilities.CapabilityRequest{}, nil, NewInvalidRequestError(err) } - maxMemoryMBs, err := popOptionalValue[int64](in, maxMemoryMBsKey) + maxMemoryMBs, err := popOptionalValue[int64](copiedReq.Config, maxMemoryMBsKey) if err != nil { - return nil, NewInvalidRequestError(err) + return capabilities.CapabilityRequest{}, nil, NewInvalidRequestError(err) } mc := &host.ModuleConfig{ @@ -63,30 +71,30 @@ func (t *transformer) Transform(in *values.Map, opts ...func(*ParsedConfig)) (*P Labeler: t.emitter, } - timeout, err := popOptionalValue[string](in, timeoutKey) + timeout, err := popOptionalValue[string](copiedReq.Config, timeoutKey) if err != nil { - return nil, NewInvalidRequestError(err) + return capabilities.CapabilityRequest{}, nil, NewInvalidRequestError(err) } var td time.Duration if timeout != "" { td, err = time.ParseDuration(timeout) if err != nil { - return nil, NewInvalidRequestError(err) + return capabilities.CapabilityRequest{}, nil, NewInvalidRequestError(err) } mc.Timeout = &td } - tickInterval, err := popOptionalValue[string](in, tickIntervalKey) + tickInterval, err := popOptionalValue[string](copiedReq.Config, tickIntervalKey) if err != nil { - return nil, NewInvalidRequestError(err) + return capabilities.CapabilityRequest{}, nil, NewInvalidRequestError(err) } var ti time.Duration if tickInterval != "" { ti, err = time.ParseDuration(tickInterval) if err != nil { - return nil, NewInvalidRequestError(err) + return capabilities.CapabilityRequest{}, nil, NewInvalidRequestError(err) } mc.TickInterval = ti } @@ -101,7 +109,7 @@ func (t *transformer) Transform(in *values.Map, opts ...func(*ParsedConfig)) (*P opt(pc) } - return pc, nil + return copiedReq, pc, nil } func NewTransformer(lggr logger.Logger, emitter custmsg.MessageEmitter) *transformer { diff --git a/core/capabilities/compute/transformer_test.go b/core/capabilities/compute/transformer_test.go index 83131636462..ee77e20d6f6 100644 --- a/core/capabilities/compute/transformer_test.go +++ b/core/capabilities/compute/transformer_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/smartcontractkit/chainlink-common/pkg/capabilities" "github.com/smartcontractkit/chainlink-common/pkg/custmsg" "github.com/smartcontractkit/chainlink-common/pkg/values" "github.com/smartcontractkit/chainlink-common/pkg/workflows/wasm/host" @@ -94,6 +95,9 @@ func Test_transformer(t *testing.T) { "binary": []byte{0x01, 0x02, 0x03}, "config": []byte{0x04, 0x05, 0x06}, }) + giveReq := capabilities.CapabilityRequest{ + Config: giveMap, + } require.NoError(t, err) wantTO := 4 * time.Second @@ -110,7 +114,7 @@ func Test_transformer(t *testing.T) { } tf := NewTransformer(lgger, emitter) - gotConfig, err := tf.Transform(giveMap) + _, gotConfig, err := tf.Transform(giveReq) require.NoError(t, err) assert.Equal(t, wantConfig, gotConfig) @@ -121,6 +125,9 @@ func Test_transformer(t *testing.T) { "binary": []byte{0x01, 0x02, 0x03}, "config": []byte{0x04, 0x05, 0x06}, }) + giveReq := capabilities.CapabilityRequest{ + Config: giveMap, + } require.NoError(t, err) wantConfig := &ParsedConfig{ @@ -133,7 +140,7 @@ func Test_transformer(t *testing.T) { } tf := NewTransformer(lgger, emitter) - gotConfig, err := tf.Transform(giveMap) + _, gotConfig, err := tf.Transform(giveReq) require.NoError(t, err) assert.Equal(t, wantConfig, gotConfig) @@ -145,10 +152,13 @@ func Test_transformer(t *testing.T) { "binary": []byte{0x01, 0x02, 0x03}, "config": []byte{0x04, 0x05, 0x06}, }) + giveReq := capabilities.CapabilityRequest{ + Config: giveMap, + } require.NoError(t, err) tf := NewTransformer(lgger, emitter) - _, err = tf.Transform(giveMap) + _, _, err = tf.Transform(giveReq) require.Error(t, err) require.ErrorContains(t, err, "invalid request") @@ -160,10 +170,13 @@ func Test_transformer(t *testing.T) { "binary": []byte{0x01, 0x02, 0x03}, "config": []byte{0x04, 0x05, 0x06}, }) + giveReq := capabilities.CapabilityRequest{ + Config: giveMap, + } require.NoError(t, err) tf := NewTransformer(lgger, emitter) - _, err = tf.Transform(giveMap) + _, _, err = tf.Transform(giveReq) require.Error(t, err) require.ErrorContains(t, err, "invalid request") diff --git a/core/capabilities/integration_tests/framework/capabilities_registry.go b/core/capabilities/integration_tests/framework/capabilities_registry.go index 838303a9f16..5c23d2ebc1a 100644 --- a/core/capabilities/integration_tests/framework/capabilities_registry.go +++ b/core/capabilities/integration_tests/framework/capabilities_registry.go @@ -64,6 +64,8 @@ func (r *CapabilitiesRegistry) getAddress() common.Address { type capability struct { donCapabilityConfig *pb.CapabilityConfig registryConfig kcr.CapabilitiesRegistryCapability + // internalOnly is true if the capability is published in the registry but not made available outside the DON in which it runs + internalOnly bool } // SetupDON sets up a new DON with the given capabilities and returns the DON ID diff --git a/core/capabilities/integration_tests/framework/don.go b/core/capabilities/integration_tests/framework/don.go index 0c0284e53d3..999966bdc1d 100644 --- a/core/capabilities/integration_tests/framework/don.go +++ b/core/capabilities/integration_tests/framework/don.go @@ -2,6 +2,7 @@ package framework import ( "context" + "encoding/hex" "fmt" "strconv" "testing" @@ -13,6 +14,8 @@ import ( "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" + "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink/v2/core/services/registrysyncer" commoncap "github.com/smartcontractkit/chainlink-common/pkg/capabilities" @@ -40,13 +43,13 @@ import ( type DonContext struct { EthBlockchain *EthBlockchain - p2pNetwork *MockRageP2PNetwork + p2pNetwork *FakeRageP2PNetwork capabilityRegistry *CapabilitiesRegistry } func CreateDonContext(ctx context.Context, t *testing.T) DonContext { ethBlockchain := NewEthBlockchain(t, 1000, 1*time.Second) - rageP2PNetwork := NewMockRageP2PNetwork(t, 1000) + rageP2PNetwork := NewFakeRageP2PNetwork(ctx, t, 1000) capabilitiesRegistry := NewCapabilitiesRegistry(ctx, t, ethBlockchain) servicetest.Run(t, rageP2PNetwork) @@ -54,6 +57,29 @@ func CreateDonContext(ctx context.Context, t *testing.T) DonContext { return DonContext{EthBlockchain: ethBlockchain, p2pNetwork: rageP2PNetwork, capabilityRegistry: capabilitiesRegistry} } +func (c DonContext) WaitForCapabilitiesToBeExposed(t *testing.T, dons ...*DON) { + allExpectedCapabilities := make(map[CapabilityRegistration]bool) + for _, don := range dons { + caps, err := don.GetExternalCapabilities() + require.NoError(t, err) + for k, v := range caps { + allExpectedCapabilities[k] = v + } + } + + require.Eventually(t, func() bool { + registrations := c.p2pNetwork.GetCapabilityRegistrations() + + for k := range allExpectedCapabilities { + if _, ok := registrations[k]; !ok { + return false + } + } + + return true + }, 1*time.Minute, 1*time.Second, "timeout waiting for capabilities to be exposed") +} + type capabilityNode struct { *cltest.TestApplication registry *capabilities.Registry @@ -64,12 +90,13 @@ type capabilityNode struct { } type DON struct { + services.StateMachine t *testing.T config DonConfiguration lggr logger.Logger nodes []*capabilityNode standardCapabilityJobs []*job.Job - externalCapabilities []capability + publishedCapabilities []capability capabilitiesRegistry *CapabilitiesRegistry nodeConfigModifiers []func(c *chainlink.Config, node *capabilityNode) @@ -84,10 +111,13 @@ func NewDON(ctx context.Context, t *testing.T, lggr logger.Logger, donConfig Don dependentDONs []commoncap.DON, donContext DonContext, supportsOCR bool) *DON { don := &DON{t: t, lggr: lggr.Named(donConfig.name), config: donConfig, capabilitiesRegistry: donContext.capabilityRegistry} + protocolRoundInterval := 1 * time.Second + var newOracleFactoryFn standardcapabilities.NewOracleFactoryFn - var libOcr *MockLibOCR + var libOcr *FakeLibOCR if supportsOCR { - libOcr = NewMockLibOCR(t, lggr, donConfig.F, 1*time.Second) + // This is required to support the non standard OCR3 capability - will be removed when required OCR3 behaviour is implemented as standard capabilities + libOcr = NewFakeLibOCR(t, lggr, donConfig.F, protocolRoundInterval) servicetest.Run(t, libOcr) } @@ -110,7 +140,8 @@ func NewDON(ctx context.Context, t *testing.T, lggr logger.Logger, donConfig Don don.nodes = append(don.nodes, cn) if supportsOCR { - factory := newMockLibOcrOracleFactory(libOcr, donConfig.KeyBundles[i], len(donConfig.Members), int(donConfig.F)) + factory := newFakeOracleFactoryFactory(t, lggr, donConfig.KeyBundles[i], len(donConfig.Members), donConfig.F, + protocolRoundInterval) newOracleFactoryFn = factory.NewOracleFactory } @@ -134,12 +165,10 @@ func NewDON(ctx context.Context, t *testing.T, lggr logger.Logger, donConfig Don // Initialise must be called after all capabilities have been added to the DONs and before Start is called func (d *DON) Initialise() { - if len(d.externalCapabilities) > 0 { - id := d.capabilitiesRegistry.setupDON(d.config, d.externalCapabilities) + id := d.capabilitiesRegistry.setupDON(d.config, d.publishedCapabilities) - //nolint:gosec // disable G115 - d.config.DON.ID = uint32(id) - } + //nolint:gosec // disable G115 + d.config.DON.ID = uint32(id) } func (d *DON) GetID() uint32 { @@ -150,6 +179,29 @@ func (d *DON) GetID() uint32 { return d.config.ID } +func (d *DON) GetExternalCapabilities() (map[CapabilityRegistration]bool, error) { + result := map[CapabilityRegistration]bool{} + for _, publishedCapability := range d.publishedCapabilities { + if publishedCapability.internalOnly { + continue + } + + for _, node := range d.nodes { + peerIDBytes, err := peerIDToBytes(node.peerID.PeerID) + if err != nil { + return nil, fmt.Errorf("failed to convert peer ID to bytes: %w", err) + } + result[CapabilityRegistration{ + nodePeerID: hex.EncodeToString(peerIDBytes[:]), + capabilityID: publishedCapability.registryConfig.LabelledName + "@" + publishedCapability.registryConfig.Version, + capabilityDonID: d.GetID(), + }] = true + } + } + + return result, nil +} + func (d *DON) GetConfigVersion() uint32 { return d.config.ConfigVersion } @@ -162,20 +214,22 @@ func (d *DON) GetPeerIDs() []peer { return d.config.peerIDs } -func (d *DON) Start(ctx context.Context, t *testing.T) { +func (d *DON) Start(ctx context.Context) error { for _, triggerFactory := range d.triggerFactories { for _, node := range d.nodes { - trigger := triggerFactory.CreateNewTrigger(t) - err := node.registry.Add(ctx, trigger) - require.NoError(t, err) + trigger := triggerFactory.CreateNewTrigger(d.t) + if err := node.registry.Add(ctx, trigger); err != nil { + return fmt.Errorf("failed to add trigger: %w", err) + } } } for _, targetFactory := range d.targetFactories { for _, node := range d.nodes { - target := targetFactory.CreateNewTarget(t) - err := node.registry.Add(ctx, target) - require.NoError(t, err) + target := targetFactory.CreateNewTarget(d.t) + if err := node.registry.Add(ctx, target); err != nil { + return fmt.Errorf("failed to add target: %w", err) + } } } @@ -184,18 +238,31 @@ func (d *DON) Start(ctx context.Context, t *testing.T) { } if d.addOCR3NonStandardCapability { - libocr := NewMockLibOCR(t, d.lggr, d.config.F, 1*time.Second) - servicetest.Run(t, libocr) + libocr := NewFakeLibOCR(d.t, d.lggr, d.config.F, 1*time.Second) + servicetest.Run(d.t, libocr) for _, node := range d.nodes { - addOCR3Capability(ctx, t, d.lggr, node.registry, libocr, d.config.F, node.KeyBundle) + addOCR3Capability(ctx, d.t, d.lggr, node.registry, libocr, d.config.F, node.KeyBundle) } } for _, capabilityJob := range d.standardCapabilityJobs { - err := d.AddJob(ctx, capabilityJob) - require.NoError(t, err) + if err := d.AddJob(ctx, capabilityJob); err != nil { + return fmt.Errorf("failed to add standard capability job: %w", err) + } } + + return nil +} + +func (d *DON) Close() error { + for _, node := range d.nodes { + if err := node.Stop(); err != nil { + return fmt.Errorf("failed to stop node: %w", err) + } + } + + return nil } const StandardCapabilityTemplateJobSpec = ` @@ -203,7 +270,7 @@ type = "standardcapabilities" schemaVersion = 1 name = "%s" command="%s" -config="%s" +config=%s ` func (d *DON) AddStandardCapability(name string, command string, config string) { @@ -214,11 +281,30 @@ func (d *DON) AddStandardCapability(name string, command string, config string) d.standardCapabilityJobs = append(d.standardCapabilityJobs, &capabilitiesSpecJob) } +func (d *DON) AddPublishedStandardCapability(name string, command string, config string, + defaultCapabilityRequestConfig *pb.CapabilityConfig, + registryConfig kcr.CapabilitiesRegistryCapability) { + spec := fmt.Sprintf(StandardCapabilityTemplateJobSpec, name, command, config) + capabilitiesSpecJob, err := standardcapabilities.ValidatedStandardCapabilitiesSpec(spec) + require.NoError(d.t, err) + + d.standardCapabilityJobs = append(d.standardCapabilityJobs, &capabilitiesSpecJob) + + d.publishedCapabilities = append(d.publishedCapabilities, capability{ + donCapabilityConfig: defaultCapabilityRequestConfig, + registryConfig: registryConfig, + }) +} + // TODO - add configuration for remote support - do this for each capability as an option func (d *DON) AddTargetCapability(targetFactory TargetFactory) { d.targetFactories = append(d.targetFactories, targetFactory) } +func (d *DON) AddTriggerCapability(triggerFactory TriggerFactory) { + d.triggerFactories = append(d.triggerFactories, triggerFactory) +} + func (d *DON) AddExternalTriggerCapability(triggerFactory TriggerFactory) { d.triggerFactories = append(d.triggerFactories, triggerFactory) @@ -243,7 +329,7 @@ func (d *DON) AddExternalTriggerCapability(triggerFactory TriggerFactory) { }, } - d.externalCapabilities = append(d.externalCapabilities, triggerCapability) + d.publishedCapabilities = append(d.publishedCapabilities, triggerCapability) } func (d *DON) AddJob(ctx context.Context, j *job.Job) error { @@ -323,9 +409,10 @@ func (d *DON) AddOCR3NonStandardCapability() { CapabilityType: uint8(registrysyncer.ContractCapabilityTypeConsensus), } - d.externalCapabilities = append(d.externalCapabilities, capability{ + d.publishedCapabilities = append(d.publishedCapabilities, capability{ donCapabilityConfig: newCapabilityConfig(), registryConfig: ocr, + internalOnly: true, }) } @@ -357,7 +444,7 @@ func (d *DON) AddEthereumWriteTargetNonStandardCapability(forwarderAddr common.A }, } - d.externalCapabilities = append(d.externalCapabilities, capability{ + d.publishedCapabilities = append(d.publishedCapabilities, capability{ donCapabilityConfig: targetCapabilityConfig, registryConfig: writeChain, }) @@ -366,7 +453,7 @@ func (d *DON) AddEthereumWriteTargetNonStandardCapability(forwarderAddr common.A } func addOCR3Capability(ctx context.Context, t *testing.T, lggr logger.Logger, capabilityRegistry *capabilities.Registry, - libocr *MockLibOCR, donF uint8, ocr2KeyBundle ocr2key.KeyBundle) { + libocr *FakeLibOCR, donF uint8, ocr2KeyBundle ocr2key.KeyBundle) { requestTimeout := 10 * time.Minute cfg := ocr3.Config{ Logger: lggr, @@ -394,6 +481,6 @@ func addOCR3Capability(ctx context.Context, t *testing.T, lggr logger.Logger, ca libocr.AddNode(plugin, transmitter, ocr2KeyBundle) } -func Context(tb testing.TB) context.Context { - return testutils.Context(tb) +func Context(tb testing.TB) (ctx context.Context, cancel func()) { + return context.WithCancel(testutils.Context(tb)) } diff --git a/core/capabilities/integration_tests/framework/mock_dispatcher.go b/core/capabilities/integration_tests/framework/fake_dispatcher.go similarity index 62% rename from core/capabilities/integration_tests/framework/mock_dispatcher.go rename to core/capabilities/integration_tests/framework/fake_dispatcher.go index f208933f1f1..cc6655a035c 100644 --- a/core/capabilities/integration_tests/framework/mock_dispatcher.go +++ b/core/capabilities/integration_tests/framework/fake_dispatcher.go @@ -2,11 +2,15 @@ package framework import ( "context" + "encoding/hex" + "errors" "fmt" "sync" "testing" "time" + "github.com/smartcontractkit/libocr/ragep2p/types" + "github.com/smartcontractkit/chainlink-common/pkg/services" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" "github.com/smartcontractkit/chainlink/v2/core/capabilities/remote" @@ -16,11 +20,12 @@ import ( "google.golang.org/protobuf/proto" ) -// MockRageP2PNetwork backs the dispatchers created for each node in the test and effectively +// FakeRageP2PNetwork backs the dispatchers created for each node in the test and effectively // acts as the rageP2P network layer. -type MockRageP2PNetwork struct { +type FakeRageP2PNetwork struct { services.StateMachine - t *testing.T + t *testing.T + readyError error chanBufferSize int stopCh services.StopChan @@ -28,34 +33,56 @@ type MockRageP2PNetwork struct { peerIDToBrokerNode map[p2ptypes.PeerID]*brokerNode + capabilityRegistrations map[CapabilityRegistration]bool + mux sync.Mutex } -func NewMockRageP2PNetwork(t *testing.T, chanBufferSize int) *MockRageP2PNetwork { - return &MockRageP2PNetwork{ - t: t, - stopCh: make(services.StopChan), - chanBufferSize: chanBufferSize, - peerIDToBrokerNode: make(map[p2ptypes.PeerID]*brokerNode), +func NewFakeRageP2PNetwork(ctx context.Context, t *testing.T, chanBufferSize int) *FakeRageP2PNetwork { + network := &FakeRageP2PNetwork{ + t: t, + stopCh: make(services.StopChan), + chanBufferSize: chanBufferSize, + peerIDToBrokerNode: make(map[p2ptypes.PeerID]*brokerNode), + capabilityRegistrations: make(map[CapabilityRegistration]bool), } + + go func() { + <-ctx.Done() + network.SetReadyError(errors.New("context done")) + }() + + return network } -func (a *MockRageP2PNetwork) Start(ctx context.Context) error { - return a.StartOnce("MockRageP2PNetwork", func() error { +func (a *FakeRageP2PNetwork) Start(ctx context.Context) error { + return a.StartOnce("FakeRageP2PNetwork", func() error { return nil }) } -func (a *MockRageP2PNetwork) Close() error { - return a.StopOnce("MockRageP2PNetwork", func() error { +func (a *FakeRageP2PNetwork) Close() error { + return a.StopOnce("FakeRageP2PNetwork", func() error { close(a.stopCh) a.wg.Wait() return nil }) } +func (a *FakeRageP2PNetwork) Ready() error { + a.mux.Lock() + defer a.mux.Unlock() + return a.readyError +} + +func (a *FakeRageP2PNetwork) SetReadyError(err error) { + a.mux.Lock() + defer a.mux.Unlock() + a.readyError = err +} + // NewDispatcherForNode creates a new dispatcher for a node with the given peer ID. -func (a *MockRageP2PNetwork) NewDispatcherForNode(nodePeerID p2ptypes.PeerID) remotetypes.Dispatcher { +func (a *FakeRageP2PNetwork) NewDispatcherForNode(nodePeerID p2ptypes.PeerID) remotetypes.Dispatcher { return &brokerDispatcher{ callerPeerID: nodePeerID, broker: a, @@ -63,18 +90,41 @@ func (a *MockRageP2PNetwork) NewDispatcherForNode(nodePeerID p2ptypes.PeerID) re } } -func (a *MockRageP2PNetwork) HealthReport() map[string]error { +func (a *FakeRageP2PNetwork) HealthReport() map[string]error { return nil } -func (a *MockRageP2PNetwork) Name() string { - return "MockRageP2PNetwork" +func (a *FakeRageP2PNetwork) Name() string { + return "FakeRageP2PNetwork" +} + +type CapabilityRegistration struct { + nodePeerID string + capabilityID string + capabilityDonID uint32 } -func (a *MockRageP2PNetwork) registerReceiverNode(nodePeerID p2ptypes.PeerID, capabilityID string, capabilityDonID uint32, receiver remotetypes.Receiver) { +func (a *FakeRageP2PNetwork) GetCapabilityRegistrations() map[CapabilityRegistration]bool { a.mux.Lock() defer a.mux.Unlock() + copiedRegistrations := make(map[CapabilityRegistration]bool) + for k, v := range a.capabilityRegistrations { + copiedRegistrations[k] = v + } + return copiedRegistrations +} + +func (a *FakeRageP2PNetwork) registerReceiverNode(nodePeerID p2ptypes.PeerID, capabilityID string, capabilityDonID uint32, receiver remotetypes.Receiver) { + a.mux.Lock() + defer a.mux.Unlock() + + a.capabilityRegistrations[CapabilityRegistration{ + nodePeerID: hex.EncodeToString(nodePeerID[:]), + capabilityID: capabilityID, + capabilityDonID: capabilityDonID, + }] = true + node, ok := a.peerIDToBrokerNode[nodePeerID] if !ok { node = a.newNode() @@ -90,9 +140,10 @@ func (a *MockRageP2PNetwork) registerReceiverNode(nodePeerID p2ptypes.PeerID, ca } } -func (a *MockRageP2PNetwork) Send(msg *remotetypes.MessageBody) { +func (a *FakeRageP2PNetwork) Send(msg *remotetypes.MessageBody) { peerID := toPeerID(msg.Receiver) - node, ok := a.peerIDToBrokerNode[peerID] + + node, ok := a.getNodeForPeerID(peerID) if !ok { panic(fmt.Sprintf("node not found for peer ID %v", peerID)) } @@ -100,6 +151,13 @@ func (a *MockRageP2PNetwork) Send(msg *remotetypes.MessageBody) { node.receiveCh <- msg } +func (a *FakeRageP2PNetwork) getNodeForPeerID(peerID types.PeerID) (*brokerNode, bool) { + a.mux.Lock() + defer a.mux.Unlock() + node, ok := a.peerIDToBrokerNode[peerID] + return node, ok +} + type brokerNode struct { registerReceiverCh chan *registerReceiverRequest receiveCh chan *remotetypes.MessageBody @@ -115,7 +173,7 @@ type registerReceiverRequest struct { receiver remotetypes.Receiver } -func (a *MockRageP2PNetwork) newNode() *brokerNode { +func (a *FakeRageP2PNetwork) newNode() *brokerNode { n := &brokerNode{ receiveCh: make(chan *remotetypes.MessageBody, a.chanBufferSize), registerReceiverCh: make(chan *registerReceiverRequest, a.chanBufferSize), @@ -155,6 +213,7 @@ func toPeerID(id []byte) p2ptypes.PeerID { type broker interface { Send(msg *remotetypes.MessageBody) + Ready() error } type brokerDispatcher struct { @@ -190,7 +249,7 @@ func (t *brokerDispatcher) SetReceiver(capabilityId string, donId uint32, receiv } t.receivers[k] = receiver - t.broker.(*MockRageP2PNetwork).registerReceiverNode(t.callerPeerID, capabilityId, donId, receiver) + t.broker.(*FakeRageP2PNetwork).registerReceiverNode(t.callerPeerID, capabilityId, donId, receiver) return nil } func (t *brokerDispatcher) RemoveReceiver(capabilityId string, donId uint32) {} @@ -202,7 +261,7 @@ func (t *brokerDispatcher) Close() error { } func (t *brokerDispatcher) Ready() error { - return nil + return t.broker.Ready() } func (t *brokerDispatcher) HealthReport() map[string]error { @@ -210,5 +269,5 @@ func (t *brokerDispatcher) HealthReport() map[string]error { } func (t *brokerDispatcher) Name() string { - return "mockDispatcher" + return "fakeDispatcher" } diff --git a/core/capabilities/integration_tests/framework/mock_libocr.go b/core/capabilities/integration_tests/framework/fake_libocr.go similarity index 64% rename from core/capabilities/integration_tests/framework/mock_libocr.go rename to core/capabilities/integration_tests/framework/fake_libocr.go index 39705031f55..0f378a39129 100644 --- a/core/capabilities/integration_tests/framework/mock_libocr.go +++ b/core/capabilities/integration_tests/framework/fake_libocr.go @@ -24,56 +24,105 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/logger" ) +type oracleContext struct { + t *testing.T + lggr logger.Logger + key ocr2key.KeyBundle + N int + F uint8 + protocolRoundInterval time.Duration + mux sync.Mutex + pluginNameToFakeOcr map[string]*FakeLibOCR +} + +func (m *oracleContext) addPlugin(ctx context.Context, info ocr3types.ReportingPluginInfo, plugin ocr3types.ReportingPlugin[[]byte], + args coretypes.OracleArgs) error { + m.mux.Lock() + defer m.mux.Unlock() + + libOcr := m.pluginNameToFakeOcr[info.Name] + if libOcr == nil { + libOcr = NewFakeLibOCR(m.t, m.lggr, m.F, m.protocolRoundInterval) + m.pluginNameToFakeOcr[info.Name] = libOcr + } + + libOcr.AddNode(plugin, args.ContractTransmitter, m.key) + + if libOcr.GetNodeCount() == m.N { + err := libOcr.Start(ctx) + if err != nil { + return fmt.Errorf("failed to start fake lib ocr: %w", err) + } + } + return nil +} + +func (m *oracleContext) Close() error { + m.mux.Lock() + defer m.mux.Unlock() + + for _, libOcr := range m.pluginNameToFakeOcr { + if err := libOcr.Close(); err != nil { + return fmt.Errorf("failed to close fake lib ocr: %w", err) + } + } + return nil +} + type oracleFactoryFactory struct { - mockLibOCr *MockLibOCR - key ocr2key.KeyBundle - N int - F int + oracleContext *oracleContext } -func newMockLibOcrOracleFactory(mockLibOCr *MockLibOCR, key ocr2key.KeyBundle, N int, F int) *oracleFactoryFactory { +func newFakeOracleFactoryFactory(t *testing.T, lggr logger.Logger, key ocr2key.KeyBundle, n int, f uint8, protocolRoundInterval time.Duration) *oracleFactoryFactory { return &oracleFactoryFactory{ - mockLibOCr: mockLibOCr, - key: key, - N: N, - F: F, + oracleContext: &oracleContext{ + t: t, + lggr: lggr, + key: key, + N: n, + F: f, + protocolRoundInterval: protocolRoundInterval, + pluginNameToFakeOcr: make(map[string]*FakeLibOCR), + }, } } func (o *oracleFactoryFactory) NewOracleFactory(params generic.OracleFactoryParams) (coretypes.OracleFactory, error) { - return &mockOracleFactory{o}, nil + return &fakeOracleFactory{o.oracleContext}, nil } -type mockOracle struct { - *mockOracleFactory - args coretypes.OracleArgs - libocrNodeID string +type fakeOracleFactory struct { + oracleContext *oracleContext } -func (m *mockOracle) Start(ctx context.Context) error { - plugin, _, err := m.args.ReportingPluginFactoryService.NewReportingPlugin(ctx, ocr3types.ReportingPluginConfig{ - F: m.F, - N: m.N, +func (m *fakeOracleFactory) NewOracle(ctx context.Context, args coretypes.OracleArgs) (coretypes.Oracle, error) { + return &fakeOracle{oracleContext: m.oracleContext, args: args}, nil +} + +type fakeOracle struct { + oracleContext *oracleContext + args coretypes.OracleArgs +} + +func (m *fakeOracle) Start(ctx context.Context) error { + plugin, info, err := m.args.ReportingPluginFactoryService.NewReportingPlugin(ctx, ocr3types.ReportingPluginConfig{ + F: int(m.oracleContext.F), + N: m.oracleContext.N, }) + if err != nil { return fmt.Errorf("failed to create reporting plugin: %w", err) } - m.libocrNodeID = m.mockLibOCr.AddNode(plugin, m.args.ContractTransmitter, m.key) - return nil -} + if err = m.oracleContext.addPlugin(ctx, info, plugin, m.args); err != nil { + return fmt.Errorf("failed to add plugin: %w", err) + } -func (m *mockOracle) Close(ctx context.Context) error { - m.mockLibOCr.RemoveNode(m.libocrNodeID) return nil } -type mockOracleFactory struct { - *oracleFactoryFactory -} - -func (m *mockOracleFactory) NewOracle(ctx context.Context, args coretypes.OracleArgs) (coretypes.Oracle, error) { - return &mockOracle{mockOracleFactory: m, args: args}, nil +func (m *fakeOracle) Close(ctx context.Context) error { + return m.oracleContext.Close() } type libocrNode struct { @@ -83,9 +132,9 @@ type libocrNode struct { key ocr2key.KeyBundle } -// MockLibOCR is a mock libocr implementation for testing purposes that simulates libocr protocol rounds without having +// FakeLibOCR is a fake libocr implementation for testing purposes that simulates libocr protocol rounds without having // to setup the libocr network -type MockLibOCR struct { +type FakeLibOCR struct { services.StateMachine t *testing.T lggr logger.Logger @@ -102,8 +151,8 @@ type MockLibOCR struct { wg sync.WaitGroup } -func NewMockLibOCR(t *testing.T, lggr logger.Logger, f uint8, protocolRoundInterval time.Duration) *MockLibOCR { - return &MockLibOCR{ +func NewFakeLibOCR(t *testing.T, lggr logger.Logger, f uint8, protocolRoundInterval time.Duration) *FakeLibOCR { + return &FakeLibOCR{ t: t, lggr: lggr, f: f, outcomeCtx: ocr3types.OutcomeContext{ @@ -117,8 +166,8 @@ func NewMockLibOCR(t *testing.T, lggr logger.Logger, f uint8, protocolRoundInter } } -func (m *MockLibOCR) Start(ctx context.Context) error { - return m.StartOnce("MockLibOCR", func() error { +func (m *FakeLibOCR) Start(ctx context.Context) error { + return m.StartOnce("FakeLibOCR", func() error { m.wg.Add(1) go func() { defer m.wg.Done() @@ -144,15 +193,15 @@ func (m *MockLibOCR) Start(ctx context.Context) error { }) } -func (m *MockLibOCR) Close() error { - return m.StopOnce("MockLibOCR", func() error { +func (m *FakeLibOCR) Close() error { + return m.StopOnce("FakeLibOCR", func() error { close(m.stopCh) m.wg.Wait() return nil }) } -func (m *MockLibOCR) AddNode(plugin ocr3types.ReportingPlugin[[]byte], transmitter ocr3types.ContractTransmitter[[]byte], key ocr2key.KeyBundle) string { +func (m *FakeLibOCR) AddNode(plugin ocr3types.ReportingPlugin[[]byte], transmitter ocr3types.ContractTransmitter[[]byte], key ocr2key.KeyBundle) string { m.mux.Lock() defer m.mux.Unlock() node := &libocrNode{uuid.New().String(), plugin, transmitter, key} @@ -160,7 +209,13 @@ func (m *MockLibOCR) AddNode(plugin ocr3types.ReportingPlugin[[]byte], transmitt return node.id } -func (m *MockLibOCR) RemoveNode(id string) { +func (m *FakeLibOCR) GetNodeCount() int { + m.mux.Lock() + defer m.mux.Unlock() + return len(m.nodes) +} + +func (m *FakeLibOCR) RemoveNode(id string) { m.mux.Lock() defer m.mux.Unlock() @@ -174,7 +229,7 @@ func (m *MockLibOCR) RemoveNode(id string) { m.nodes = updatedNodes } -func (m *MockLibOCR) simulateProtocolRound(ctx context.Context) error { +func (m *FakeLibOCR) simulateProtocolRound(ctx context.Context) error { m.mux.Lock() defer m.mux.Unlock() if len(m.nodes) == 0 { diff --git a/core/capabilities/integration_tests/framework/mock_target.go b/core/capabilities/integration_tests/framework/fake_target.go similarity index 80% rename from core/capabilities/integration_tests/framework/mock_target.go rename to core/capabilities/integration_tests/framework/fake_target.go index e9c03deaca2..442d35e3595 100644 --- a/core/capabilities/integration_tests/framework/mock_target.go +++ b/core/capabilities/integration_tests/framework/fake_target.go @@ -9,7 +9,7 @@ import ( ) var ( - _ capabilities.ActionCapability = &mockTarget{} + _ capabilities.ActionCapability = &fakeTarget{} ) type TargetSink struct { @@ -18,7 +18,7 @@ type TargetSink struct { targetName string version string - targets []mockTarget + targets []fakeTarget Sink chan capabilities.CapabilityRequest } @@ -56,7 +56,7 @@ func (ts *TargetSink) Close() error { } func (ts *TargetSink) CreateNewTarget(t *testing.T) capabilities.TargetCapability { - target := mockTarget{ + target := fakeTarget{ t: t, targetID: ts.targetID, ch: ts.Sink, @@ -65,29 +65,29 @@ func (ts *TargetSink) CreateNewTarget(t *testing.T) capabilities.TargetCapabilit return &target } -type mockTarget struct { +type fakeTarget struct { t *testing.T targetID string ch chan capabilities.CapabilityRequest } -func (mt *mockTarget) Execute(ctx context.Context, rawRequest capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) { +func (mt *fakeTarget) Execute(ctx context.Context, rawRequest capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) { mt.ch <- rawRequest return capabilities.CapabilityResponse{}, nil } -func (mt *mockTarget) Info(ctx context.Context) (capabilities.CapabilityInfo, error) { +func (mt *fakeTarget) Info(ctx context.Context) (capabilities.CapabilityInfo, error) { return capabilities.MustNewCapabilityInfo( mt.targetID, capabilities.CapabilityTypeTarget, - "mock target for target ID "+mt.targetID, + "fake target for target ID "+mt.targetID, ), nil } -func (mt *mockTarget) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { +func (mt *fakeTarget) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { return nil } -func (mt *mockTarget) UnregisterFromWorkflow(ctx context.Context, request capabilities.UnregisterFromWorkflowRequest) error { +func (mt *fakeTarget) UnregisterFromWorkflow(ctx context.Context, request capabilities.UnregisterFromWorkflowRequest) error { return nil } diff --git a/core/capabilities/integration_tests/framework/mock_trigger.go b/core/capabilities/integration_tests/framework/fake_trigger.go similarity index 84% rename from core/capabilities/integration_tests/framework/mock_trigger.go rename to core/capabilities/integration_tests/framework/fake_trigger.go index afc874af6c3..4274eddf5ca 100644 --- a/core/capabilities/integration_tests/framework/mock_trigger.go +++ b/core/capabilities/integration_tests/framework/fake_trigger.go @@ -20,7 +20,7 @@ type TriggerSink struct { triggerName string version string - triggers []mockTrigger + triggers []fakeTrigger stopCh services.StopChan wg sync.WaitGroup @@ -80,12 +80,12 @@ func (r *TriggerSink) SendOutput(outputs *values.Map) { } func (r *TriggerSink) CreateNewTrigger(t *testing.T) capabilities.TriggerCapability { - trigger := newMockTrigger(t, r.triggerID, &r.wg, r.stopCh) + trigger := newFakeTrigger(t, r.triggerID, &r.wg, r.stopCh) r.triggers = append(r.triggers, trigger) return &trigger } -type mockTrigger struct { +type fakeTrigger struct { t *testing.T triggerID string cancel context.CancelFunc @@ -95,8 +95,8 @@ type mockTrigger struct { stopCh services.StopChan } -func newMockTrigger(t *testing.T, triggerID string, wg *sync.WaitGroup, stopCh services.StopChan) mockTrigger { - return mockTrigger{ +func newFakeTrigger(t *testing.T, triggerID string, wg *sync.WaitGroup, stopCh services.StopChan) fakeTrigger { + return fakeTrigger{ t: t, triggerID: triggerID, toSend: make(chan capabilities.TriggerResponse, 1000), @@ -105,19 +105,19 @@ func newMockTrigger(t *testing.T, triggerID string, wg *sync.WaitGroup, stopCh s } } -func (s *mockTrigger) sendResponse(resp capabilities.TriggerResponse) { +func (s *fakeTrigger) sendResponse(resp capabilities.TriggerResponse) { s.toSend <- resp } -func (s *mockTrigger) Info(ctx context.Context) (capabilities.CapabilityInfo, error) { +func (s *fakeTrigger) Info(ctx context.Context) (capabilities.CapabilityInfo, error) { return capabilities.MustNewCapabilityInfo( s.triggerID, capabilities.CapabilityTypeTrigger, - "mock trigger for trigger id "+s.triggerID, + "fake trigger for trigger id "+s.triggerID, ), nil } -func (s *mockTrigger) RegisterTrigger(ctx context.Context, request capabilities.TriggerRegistrationRequest) (<-chan capabilities.TriggerResponse, error) { +func (s *fakeTrigger) RegisterTrigger(ctx context.Context, request capabilities.TriggerRegistrationRequest) (<-chan capabilities.TriggerResponse, error) { if s.cancel != nil { s.t.Fatal("trigger already registered") } @@ -144,7 +144,7 @@ func (s *mockTrigger) RegisterTrigger(ctx context.Context, request capabilities. return responseCh, nil } -func (s *mockTrigger) UnregisterTrigger(ctx context.Context, request capabilities.TriggerRegistrationRequest) error { +func (s *fakeTrigger) UnregisterTrigger(ctx context.Context, request capabilities.TriggerRegistrationRequest) error { if s.cancel == nil { s.t.Fatal("trigger not registered") } diff --git a/core/capabilities/integration_tests/keystone/keystone_test.go b/core/capabilities/integration_tests/keystone/keystone_test.go index 033bb8a2c76..17bfde7cda9 100644 --- a/core/capabilities/integration_tests/keystone/keystone_test.go +++ b/core/capabilities/integration_tests/keystone/keystone_test.go @@ -17,7 +17,6 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/values" "github.com/smartcontractkit/chainlink/v2/core/capabilities/integration_tests/framework" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/feeds_consumer" - "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/logger" reporttypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3/types" ) @@ -31,7 +30,9 @@ func Test_OneAtATimeTransmissionSchedule(t *testing.T) { } func testTransmissionSchedule(t *testing.T, deltaStage string, schedule string) { - ctx := testutils.Context(t) + ctx, cancel := framework.Context(t) + defer cancel() + lggr := logger.TestLogger(t) lggr.SetLogLevel(zapcore.InfoLevel) @@ -107,7 +108,7 @@ func waitForConsumerReports(ctx context.Context, t *testing.T, consumer *feeds_c for { select { case <-ctxWithTimeout.Done(): - t.Fatalf("timed out waiting for feed reports, expected %d, received %d", len(triggerFeedReports), feedCount) + t.Fatalf("timed out waiting for feeds reports, expected %d, received %d", len(triggerFeedReports), feedCount) case err := <-feedsSub.Err(): require.NoError(t, err) case feed := <-feedsReceived: diff --git a/core/capabilities/integration_tests/keystone/setup.go b/core/capabilities/integration_tests/keystone/setup.go index f90b582d0ee..b9b98baaf7e 100644 --- a/core/capabilities/integration_tests/keystone/setup.go +++ b/core/capabilities/integration_tests/keystone/setup.go @@ -11,6 +11,8 @@ import ( "github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil" + "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" + commoncap "github.com/smartcontractkit/chainlink-common/pkg/capabilities" "github.com/smartcontractkit/chainlink-common/pkg/capabilities/datastreams" v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3" @@ -46,9 +48,11 @@ func setupKeystoneDons(ctx context.Context, t *testing.T, lggr logger.SugaredLog triggerDon := createKeystoneTriggerDon(ctx, t, lggr, triggerDonInfo, donContext, trigger) - workflowDon.Start(ctx, t) - triggerDon.Start(ctx, t) - writeTargetDon.Start(ctx, t) + servicetest.Run(t, workflowDon) + servicetest.Run(t, triggerDon) + servicetest.Run(t, writeTargetDon) + + donContext.WaitForCapabilitiesToBeExposed(t, workflowDon, triggerDon, writeTargetDon) return workflowDon, consumer } diff --git a/core/capabilities/targets/write_target.go b/core/capabilities/targets/write_target.go index 0e0b2071829..9315a1ee199 100644 --- a/core/capabilities/targets/write_target.go +++ b/core/capabilities/targets/write_target.go @@ -7,21 +7,27 @@ import ( "encoding/hex" "fmt" "math/big" + "time" "github.com/ethereum/go-ethereum/common" "github.com/google/uuid" "github.com/smartcontractkit/chainlink-common/pkg/capabilities" "github.com/smartcontractkit/chainlink-common/pkg/capabilities/consensus/ocr3/types" + "github.com/smartcontractkit/chainlink-common/pkg/custmsg" "github.com/smartcontractkit/chainlink-common/pkg/logger" commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives" + + "github.com/smartcontractkit/chainlink/v2/core/platform" ) var ( _ capabilities.TargetCapability = &WriteTarget{} ) +const transactionStatusCheckInterval = 2 * time.Second + type WriteTarget struct { cr ContractValueGetter cw commontypes.ChainWriter @@ -31,7 +37,8 @@ type WriteTarget struct { receiverGasMinimum uint64 capabilities.CapabilityInfo - lggr logger.Logger + emitter custmsg.MessageEmitter + lggr logger.Logger bound bool } @@ -79,6 +86,7 @@ func NewWriteTarget( forwarderAddress, txGasLimit - ForwarderContractLogicGasCost, info, + custmsg.NewLabeler(), logger.Named(lggr, "WriteTarget"), false, } @@ -309,7 +317,41 @@ func (cap *WriteTarget) Execute(ctx context.Context, rawRequest capabilities.Cap } cap.lggr.Debugw("Transaction submitted", "request", request, "transaction", txID) - return capabilities.CapabilityResponse{}, nil + + tick := time.NewTicker(transactionStatusCheckInterval) + defer tick.Stop() + for { + select { + case <-ctx.Done(): + return capabilities.CapabilityResponse{}, nil + case <-tick.C: + txStatus, err := cap.cw.GetTransactionStatus(ctx, txID.String()) + if err != nil { + cap.lggr.Errorw("Failed to get transaction status", "request", request, "transaction", txID, "err", err) + continue + } + switch txStatus { + case commontypes.Finalized: + cap.lggr.Debugw("Transaction finalized", "request", request, "transaction", txID) + return capabilities.CapabilityResponse{}, nil + case commontypes.Failed, commontypes.Fatal: + cap.lggr.Error("Transaction failed", "request", request, "transaction", txID) + msg := "failed to submit transaction with ID: " + txID.String() + err = cap.emitter.With( + platform.KeyWorkflowID, request.Metadata.WorkflowID, + platform.KeyWorkflowName, request.Metadata.WorkflowName, + platform.KeyWorkflowOwner, request.Metadata.WorkflowOwner, + platform.KeyWorkflowExecutionID, request.Metadata.WorkflowExecutionID, + ).Emit(ctx, msg) + if err != nil { + cap.lggr.Errorf("failed to send custom message with msg: %s, err: %v", msg, err) + } + return capabilities.CapabilityResponse{}, fmt.Errorf("submitted transaction failed: %w", err) + default: + cap.lggr.Debugw("Unexpected transaction status", "request", request, "transaction", txID, "status", txStatus) + } + } + } } func (cap *WriteTarget) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { diff --git a/core/capabilities/targets/write_target_test.go b/core/capabilities/targets/write_target_test.go index 499f4f9b29b..38136f07df0 100644 --- a/core/capabilities/targets/write_target_test.go +++ b/core/capabilities/targets/write_target_test.go @@ -101,6 +101,7 @@ func TestWriteTarget(t *testing.T) { Config: config, Inputs: validInputs, } + cw.On("GetTransactionStatus", mock.Anything, mock.Anything).Return(types.Finalized, nil).Once() response, err2 := writeTarget.Execute(ctx, req) require.NoError(t, err2) diff --git a/core/chains/evm/types/models.go b/core/chains/evm/types/models.go index abee992539d..d4dabc96992 100644 --- a/core/chains/evm/types/models.go +++ b/core/chains/evm/types/models.go @@ -8,6 +8,7 @@ import ( "fmt" "math/big" "regexp" + "strconv" "strings" "sync/atomic" "time" @@ -18,10 +19,12 @@ import ( pkgerrors "github.com/pkg/errors" "github.com/ugorji/go/codec" + chainagnostictypes "github.com/smartcontractkit/chainlink-common/pkg/types" "github.com/smartcontractkit/chainlink-common/pkg/utils/hex" htrktypes "github.com/smartcontractkit/chainlink/v2/common/headtracker/types" commontypes "github.com/smartcontractkit/chainlink/v2/common/types" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types/internal/blocks" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" @@ -198,6 +201,9 @@ func (h *Head) ChainString() string { // String returns a string representation of this head func (h *Head) String() string { + if h == nil { + return "" + } return fmt.Sprintf("Head{Number: %d, Hash: %s, ParentHash: %s}", h.ToInt(), h.Hash.Hex(), h.ParentHash.Hex()) } @@ -325,6 +331,19 @@ func (h *Head) MarshalJSON() ([]byte, error) { return json.Marshal(jsonHead) } +func (h *Head) ToChainAgnosticHead() *chainagnostictypes.Head { + if h == nil { + return nil + } + + return &chainagnostictypes.Head{ + Height: strconv.FormatInt(h.Number, 10), + Hash: h.Hash.Bytes(), + //nolint:gosec // G115 + Timestamp: uint64(h.Timestamp.Unix()), + } +} + // Block represents an ethereum block // This type is only used for the block history estimator, and can be expensive to unmarshal. Don't add unnecessary fields here. type Block struct { diff --git a/core/config/docs/chains-solana.toml b/core/config/docs/chains-solana.toml index 87d71b49cc6..c979581b258 100644 --- a/core/config/docs/chains-solana.toml +++ b/core/config/docs/chains-solana.toml @@ -17,6 +17,8 @@ TxTimeout = '1m' # Default TxRetryTimeout = '10s' # Default # TxConfirmTimeout is the duration to wait when confirming a tx signature, before discarding as unconfirmed. TxConfirmTimeout = '30s' # Default +# TxRetentionTimeout is the duration to retain transactions in storage after being marked as finalized or errored. Set to 0 to immediately drop transactions. +TxRetentionTimeout = '0s' # Default # SkipPreflight enables or disables preflight checks when sending txs. SkipPreflight = true # Default # Commitment is the confirmation level for solana state and transactions. ([documentation](https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment)) diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 6de43b49e55..caf3d5e68e6 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -24,7 +24,7 @@ require ( github.com/prometheus/client_golang v1.20.5 github.com/shopspring/decimal v1.4.0 github.com/smartcontractkit/chainlink-automation v0.8.1 - github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 + github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371 github.com/smartcontractkit/chainlink/deployment v0.0.0-00010101000000-000000000000 github.com/smartcontractkit/chainlink/v2 v2.14.0-mercury-20240807.0.20241106193309-5560cd76211a github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12 @@ -293,14 +293,14 @@ require ( github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/shirou/gopsutil/v3 v3.24.3 // indirect github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86 // indirect - github.com/smartcontractkit/chain-selectors v1.0.27 // indirect + github.com/smartcontractkit/chain-selectors v1.0.29 // indirect github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b // indirect github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f // indirect github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e // indirect github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect github.com/smartcontractkit/chainlink-protos/job-distributor v0.4.0 // indirect github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 // indirect - github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930 // indirect + github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669 // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8 // indirect github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 // indirect github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20241009055228-33d0c0bf38de // indirect diff --git a/core/scripts/go.sum b/core/scripts/go.sum index 3a7f6db8076..362d28f28c3 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1088,14 +1088,14 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86 h1:qQH6fZZe31nBAG6INHph3z5ysDTPptyu0TR9uoJ1+ok= github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86/go.mod h1:WtWOoVQQEHxRHL2hNmuRrvDfYfQG/CioFNoa9Rr2mBE= -github.com/smartcontractkit/chain-selectors v1.0.27 h1:VE/ftX9Aae4gnw67yR1raKi+30iWKL/sWq8uyiLHM8k= -github.com/smartcontractkit/chain-selectors v1.0.27/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= +github.com/smartcontractkit/chain-selectors v1.0.29 h1:aZ9+OoUSMn4nqnissHtDvDoKR7JONfDqTHX3MHYIUIE= +github.com/smartcontractkit/chain-selectors v1.0.29/go.mod h1:xsKM0aN3YGcQKTPRPDDtPx2l4mlTN1Djmg0VVXV40b8= github.com/smartcontractkit/chainlink-automation v0.8.1 h1:sTc9LKpBvcKPc1JDYAmgBc2xpDKBco/Q4h4ydl6+UUU= github.com/smartcontractkit/chainlink-automation v0.8.1/go.mod h1:Iij36PvWZ6blrdC5A/nrQUBuf3MH3JvsBB9sSyc9W08= github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b h1:4kmZtaQ4fXwduHnw9xk5VmiIOW4nHg/Mx6iidlZJt5o= github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b/go.mod h1:4adKaHNaxFsRvV/lYfqtbsWyyvIPUMLR0FdOJN/ljis= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 h1:yJNBWCdNL/X8+wEs3TGTBe9gssMmw5FTFxxrlo+0mVo= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371 h1:vnNqMaAvheZgR8IDMGw0QIV1Qen3XTh7IChwW40SNfU= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f h1:BwrIaQIx5Iy6eT+DfLhFfK2XqjxRm74mVdlX8gbu4dw= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f/go.mod h1:wHtwSR3F1CQSJJZDQKuqaqFYnvkT+kMyget7dl8Clvo= github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e h1:JiETqdNM0bktAUGMc62COwXIaw3rR3M77Me6bBLG0Fg= @@ -1106,8 +1106,8 @@ github.com/smartcontractkit/chainlink-protos/job-distributor v0.4.0 h1:1xTm8UGeD github.com/smartcontractkit/chainlink-protos/job-distributor v0.4.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE= github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 h1:PBUaFfPLm+Efq7H9kdfGBivH+QhJ6vB5EZTR/sCZsxI= github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= -github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930 h1:blu++xbH/NSb+ii5hI4jczwojZ7Hc1ERXjpt/krYy9c= -github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930/go.mod h1:mGmRvlk54ufCufV4EBWizOGtXoXfePoFAuYEVC8EwdY= +github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669 h1:CBQ9ORUtGUvCr3dAm/qjpdHlYuB1SRIwtYw5LV8SLys= +github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669/go.mod h1:mGmRvlk54ufCufV4EBWizOGtXoXfePoFAuYEVC8EwdY= github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8 h1:B4DFdk6MGcQnoCjjMBCx7Z+GWQpxRWJ4O8W/dVJyWGA= github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8/go.mod h1:WkBqgBo+g34Gm5vWkDDl8Fh3Mzd7bF5hXp7rryg0t5o= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 h1:12ijqMM9tvYVEm+nR826WsrNi6zCKpwBhuApq127wHs= diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go index 10a861960ac..76b80672dbb 100644 --- a/core/services/chainlink/config_test.go +++ b/core/services/chainlink/config_test.go @@ -745,6 +745,7 @@ func TestConfig_Marshal(t *testing.T) { TxTimeout: commoncfg.MustNewDuration(time.Hour), TxRetryTimeout: commoncfg.MustNewDuration(time.Minute), TxConfirmTimeout: commoncfg.MustNewDuration(time.Second), + TxRetentionTimeout: commoncfg.MustNewDuration(0 * time.Second), SkipPreflight: ptr(true), Commitment: ptr("banana"), MaxRetries: ptr[int64](7), @@ -1272,6 +1273,7 @@ OCR2CacheTTL = '1h0m0s' TxTimeout = '1h0m0s' TxRetryTimeout = '1m0s' TxConfirmTimeout = '1s' +TxRetentionTimeout = '0s' SkipPreflight = true Commitment = 'banana' MaxRetries = 7 diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml index c5d79dbe5bc..c6a5302a459 100644 --- a/core/services/chainlink/testdata/config-full.toml +++ b/core/services/chainlink/testdata/config-full.toml @@ -492,6 +492,7 @@ OCR2CacheTTL = '1h0m0s' TxTimeout = '1h0m0s' TxRetryTimeout = '1m0s' TxConfirmTimeout = '1s' +TxRetentionTimeout = '0s' SkipPreflight = true Commitment = 'banana' MaxRetries = 7 diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml index d71ebc4a2d5..e8da8142181 100644 --- a/core/services/chainlink/testdata/config-multi-chain-effective.toml +++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml @@ -653,6 +653,7 @@ OCR2CacheTTL = '1m0s' TxTimeout = '1m0s' TxRetryTimeout = '10s' TxConfirmTimeout = '30s' +TxRetentionTimeout = '0s' SkipPreflight = true Commitment = 'confirmed' MaxRetries = 12 @@ -697,6 +698,7 @@ OCR2CacheTTL = '1m0s' TxTimeout = '1m0s' TxRetryTimeout = '10s' TxConfirmTimeout = '30s' +TxRetentionTimeout = '0s' SkipPreflight = true Commitment = 'confirmed' MaxRetries = 0 diff --git a/core/services/keystore/keys/csakey/key.go b/core/services/keystore/keys/csakey/key.go deleted file mode 100644 index 054994f93ea..00000000000 --- a/core/services/keystore/keys/csakey/key.go +++ /dev/null @@ -1,65 +0,0 @@ -package csakey - -import ( - "crypto/ed25519" - "errors" - "time" - - "github.com/smartcontractkit/chainlink/v2/core/utils" - "github.com/smartcontractkit/chainlink/v2/core/utils/crypto" -) - -type Key struct { - ID uint - PublicKey crypto.PublicKey - privateKey []byte - EncryptedPrivateKey crypto.EncryptedPrivateKey - CreatedAt time.Time - UpdatedAt time.Time -} - -// New creates a new CSA key consisting of an ed25519 key. It encrypts the -// Key with the passphrase. -func New(passphrase string, scryptParams utils.ScryptParams) (*Key, error) { - pubkey, privkey, err := ed25519.GenerateKey(nil) - if err != nil { - return nil, err - } - - encPrivkey, err := crypto.NewEncryptedPrivateKey(privkey, passphrase, scryptParams) - if err != nil { - return nil, err - } - - return &Key{ - PublicKey: crypto.PublicKey(pubkey), - privateKey: privkey, - EncryptedPrivateKey: *encPrivkey, - }, nil -} - -func (k *Key) Unlock(password string) error { - pk, err := k.EncryptedPrivateKey.Decrypt(password) - if err != nil { - return err - } - k.privateKey = pk - return nil -} - -func (k *Key) Unsafe_GetPrivateKey() ([]byte, error) { - if k.privateKey == nil { - return nil, errors.New("key has not been unlocked") - } - - return k.privateKey, nil -} - -func (k Key) ToV2() KeyV2 { - pk := ed25519.PrivateKey(k.privateKey) - return KeyV2{ - privateKey: &pk, - PublicKey: ed25519.PublicKey(k.PublicKey), - Version: 1, - } -} diff --git a/core/services/keystore/keys/csakey/key_test.go b/core/services/keystore/keys/csakey/key_test.go deleted file mode 100644 index 8ac05f74cf5..00000000000 --- a/core/services/keystore/keys/csakey/key_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package csakey - -import ( - "crypto/ed25519" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/smartcontractkit/chainlink/v2/core/utils" -) - -func Test_New(t *testing.T) { - passphrase := "passphrase" - key, err := New(passphrase, utils.FastScryptParams) - require.NoError(t, err) - - rawprivkey, err := key.EncryptedPrivateKey.Decrypt("passphrase") - require.NoError(t, err) - - privkey := ed25519.PrivateKey(rawprivkey) - assert.Equal(t, ed25519.PublicKey(key.PublicKey), privkey.Public()) -} - -func Test_Unlock(t *testing.T) { - passphrase := "passphrase" - key, err := New(passphrase, utils.FastScryptParams) - require.NoError(t, err) - - err = key.Unlock(passphrase) - require.NoError(t, err) - - expected, err := key.EncryptedPrivateKey.Decrypt(passphrase) - require.NoError(t, err) - - assert.Equal(t, expected, key.privateKey) -} - -func Test_GetPrivateKey(t *testing.T) { - passphrase := "passphrase" - key, err := New(passphrase, utils.FastScryptParams) - require.NoError(t, err) - - privkey, err := key.Unsafe_GetPrivateKey() - require.NoError(t, err) - assert.Equal(t, key.privateKey, privkey) -} - -func TestKey_ToV2(t *testing.T) { - passphrase := "passphrase" - key, err := New(passphrase, utils.FastScryptParams) - require.NoError(t, err) - - v2Key := key.ToV2() - - assert.Equal(t, key.PublicKey.String(), v2Key.PublicKeyString()) - assert.Equal(t, ed25519.PrivateKey(key.privateKey), *v2Key.privateKey) -} diff --git a/core/services/keystore/keys/p2pkey/key.go b/core/services/keystore/keys/p2pkey/key.go deleted file mode 100644 index abf4f70294c..00000000000 --- a/core/services/keystore/keys/p2pkey/key.go +++ /dev/null @@ -1,125 +0,0 @@ -package p2pkey - -import ( - "crypto/ed25519" - "database/sql/driver" - "encoding/hex" - "encoding/json" - "strconv" - "time" - - "github.com/ethereum/go-ethereum/accounts/keystore" - "github.com/pkg/errors" - - ragep2ptypes "github.com/smartcontractkit/libocr/ragep2p/types" -) - -// Key represents a p2p private key -type Key struct { - PrivKey ed25519.PrivateKey -} - -func (k Key) ToV2() KeyV2 { - return KeyV2{ - PrivKey: k.PrivKey, - peerID: k.PeerID(), - } -} - -// PublicKeyBytes is a [ed25519.PublicKey] -type PublicKeyBytes []byte - -func (pkb PublicKeyBytes) String() string { - return hex.EncodeToString(pkb) -} - -func (pkb PublicKeyBytes) MarshalJSON() ([]byte, error) { - return json.Marshal(hex.EncodeToString(pkb)) -} - -func (pkb *PublicKeyBytes) UnmarshalJSON(input []byte) error { - var hexString string - if err := json.Unmarshal(input, &hexString); err != nil { - return err - } - - result, err := hex.DecodeString(hexString) - if err != nil { - return err - } - - *pkb = result - return nil -} - -func (pkb *PublicKeyBytes) Scan(value interface{}) error { - switch v := value.(type) { - case []byte: - *pkb = v - return nil - default: - return errors.Errorf("invalid public key bytes got %T wanted []byte", v) - } -} - -func (pkb PublicKeyBytes) Value() (driver.Value, error) { - return []byte(pkb), nil -} - -func (k Key) GetPeerID() (PeerID, error) { - peerID, err := ragep2ptypes.PeerIDFromPrivateKey(k.PrivKey) - if err != nil { - return PeerID{}, errors.WithStack(err) - } - return PeerID(peerID), err -} - -func (k Key) PeerID() PeerID { - peerID, err := k.GetPeerID() - if err != nil { - panic(err) - } - return peerID -} - -type EncryptedP2PKey struct { - ID int32 - PeerID PeerID - PubKey PublicKeyBytes - EncryptedPrivKey []byte - CreatedAt time.Time - UpdatedAt time.Time - DeletedAt *time.Time -} - -func (ep2pk *EncryptedP2PKey) SetID(value string) error { - result, err := strconv.ParseInt(value, 10, 32) - - if err != nil { - return err - } - - ep2pk.ID = int32(result) - return nil -} - -// Decrypt returns the PrivateKey in e, decrypted via auth, or an error -func (ep2pk EncryptedP2PKey) Decrypt(auth string) (k Key, err error) { - var cryptoJSON keystore.CryptoJSON - err = json.Unmarshal(ep2pk.EncryptedPrivKey, &cryptoJSON) - if err != nil { - return k, errors.Wrapf(err, "invalid JSON for P2P key %s (0x%x)", ep2pk.PeerID.String(), ep2pk.PubKey) - } - marshalledPrivK, err := keystore.DecryptDataV3(cryptoJSON, adulteratedPassword(auth)) - if err != nil { - return k, errors.Wrapf(err, "could not decrypt P2P key %s (0x%x)", ep2pk.PeerID.String(), ep2pk.PubKey) - } - - privK, err := UnmarshalPrivateKey(marshalledPrivK) - if err != nil { - return k, errors.Wrapf(err, "could not unmarshal P2P private key for %s (0x%x)", ep2pk.PeerID.String(), ep2pk.PubKey) - } - return Key{ - privK, - }, nil -} diff --git a/core/services/keystore/keys/p2pkey/key_test.go b/core/services/keystore/keys/p2pkey/key_test.go deleted file mode 100644 index 57490483e86..00000000000 --- a/core/services/keystore/keys/p2pkey/key_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package p2pkey - -import ( - "crypto/ed25519" - "crypto/rand" - "encoding/hex" - "encoding/json" - "testing" - - "github.com/ethereum/go-ethereum/accounts/keystore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/smartcontractkit/chainlink/v2/core/utils" -) - -func TestP2PKeys_KeyStruct(t *testing.T) { - _, pk, err := ed25519.GenerateKey(rand.Reader) - require.NoError(t, err) - - k := Key{PrivKey: pk} - - t.Run("converts into V2 key", func(t *testing.T) { - k2 := k.ToV2() - - assert.Equal(t, k.PrivKey, k2.PrivKey) - assert.Equal(t, k.PeerID(), k2.peerID) - }) - - t.Run("returns PeerID", func(t *testing.T) { - pid, err := k.GetPeerID() - require.NoError(t, err) - pid2 := k.PeerID() - - assert.Equal(t, pid, pid2) - }) -} - -func TestP2PKeys_PublicKeyBytes(t *testing.T) { - pk, _, err := ed25519.GenerateKey(rand.Reader) - require.NoError(t, err) - - pkb := PublicKeyBytes(pk) - assert.Equal(t, hex.EncodeToString(pkb), pkb.String()) - - b, err := pkb.MarshalJSON() - require.NoError(t, err) - assert.NotEmpty(t, b) - - err = pkb.UnmarshalJSON(b) - assert.NoError(t, err) - - err = pkb.UnmarshalJSON([]byte("")) - assert.Error(t, err) - - err = pkb.Scan([]byte(pk)) - assert.NoError(t, err) - - err = pkb.Scan("invalid-type") - assert.Error(t, err) - - sv, err := pkb.Value() - assert.NoError(t, err) - assert.NotEmpty(t, sv) -} - -func TestP2PKeys_EncryptedP2PKey(t *testing.T) { - _, privk, err := ed25519.GenerateKey(rand.Reader) - require.NoError(t, err) - - k := Key{PrivKey: privk} - - pubkr := k.PrivKey.Public().(ed25519.PublicKey) - - var marshalledPrivK []byte - marshalledPrivK, err = MarshalPrivateKey(k.PrivKey) - require.NoError(t, err) - cryptoJSON, err := keystore.EncryptDataV3(marshalledPrivK, []byte(adulteratedPassword("password")), utils.FastScryptParams.N, utils.FastScryptParams.P) - require.NoError(t, err) - encryptedPrivKey, err := json.Marshal(&cryptoJSON) - require.NoError(t, err) - - p2pk := EncryptedP2PKey{ - ID: 1, - PeerID: k.PeerID(), - PubKey: []byte(pubkr), - EncryptedPrivKey: encryptedPrivKey, - } - - t.Run("sets a different ID", func(t *testing.T) { - err := p2pk.SetID("12") - require.NoError(t, err) - - assert.Equal(t, int32(12), p2pk.ID) - - err = p2pk.SetID("invalid") - assert.Error(t, err) - }) - - t.Run("decrypts key", func(t *testing.T) { - k, err := p2pk.Decrypt("invalid-pass") - assert.Empty(t, k) - assert.Error(t, err) - - k, err = p2pk.Decrypt("password") - require.NoError(t, err) - assert.NotEmpty(t, k) - }) -} diff --git a/core/services/keystore/keys/p2pkey/key_v2_test.go b/core/services/keystore/keys/p2pkey/key_v2_test.go index d93678b8f2d..56a93e4db1a 100644 --- a/core/services/keystore/keys/p2pkey/key_v2_test.go +++ b/core/services/keystore/keys/p2pkey/key_v2_test.go @@ -7,6 +7,7 @@ import ( "testing" ragep2ptypes "github.com/smartcontractkit/libocr/ragep2p/types" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -22,15 +23,12 @@ func TestP2PKeys_Raw(t *testing.T) { } func TestP2PKeys_KeyV2(t *testing.T) { - _, pk, err := ed25519.GenerateKey(rand.Reader) + kv2, err := NewV2() require.NoError(t, err) - k := Key{PrivKey: pk} - kv2 := k.ToV2() - pkv2 := kv2.PrivKey.Public().(ed25519.PublicKey) assert.Equal(t, kv2.String(), kv2.GoString()) - assert.Equal(t, ragep2ptypes.PeerID(k.PeerID()).String(), kv2.ID()) + assert.Equal(t, ragep2ptypes.PeerID(kv2.PeerID()).String(), kv2.ID()) assert.Equal(t, hex.EncodeToString(pkv2), kv2.PublicKeyHex()) } diff --git a/core/services/llo/delegate.go b/core/services/llo/delegate.go index 3380b4f1bc5..f5f9b5f05f1 100644 --- a/core/services/llo/delegate.go +++ b/core/services/llo/delegate.go @@ -57,6 +57,7 @@ type DelegateConfig struct { RetirementReportCodec datastreamsllo.RetirementReportCodec ShouldRetireCache datastreamsllo.ShouldRetireCache EAMonitoringEndpoint ocrcommontypes.MonitoringEndpoint + DonID uint32 // OCR3 TraceLogging bool @@ -74,7 +75,7 @@ type DelegateConfig struct { } func NewDelegate(cfg DelegateConfig) (job.ServiceCtx, error) { - lggr := logger.Sugared(cfg.Logger).With("jobName", cfg.JobName.ValueOrZero()) + lggr := logger.Sugared(cfg.Logger).With("jobName", cfg.JobName.ValueOrZero(), "donID", cfg.DonID) if cfg.DataSource == nil { return nil, errors.New("DataSource must not be nil") } @@ -94,7 +95,7 @@ func NewDelegate(cfg DelegateConfig) (job.ServiceCtx, error) { var t TelemeterService if cfg.CaptureEATelemetry { - t = NewTelemeterService(lggr, cfg.EAMonitoringEndpoint) + t = NewTelemeterService(lggr, cfg.EAMonitoringEndpoint, cfg.DonID) } else { t = NullTelemeter } @@ -110,7 +111,7 @@ func (d *delegate) Start(ctx context.Context) error { return fmt.Errorf("expected either 1 or 2 ContractConfigTrackers, got: %d", len(d.cfg.ContractConfigTrackers)) } - d.cfg.Logger.Debugw("Starting LLO job", "instances", len(d.cfg.ContractConfigTrackers), "jobName", d.cfg.JobName.ValueOrZero(), "captureEATelemetry", d.cfg.CaptureEATelemetry) + d.cfg.Logger.Debugw("Starting LLO job", "instances", len(d.cfg.ContractConfigTrackers), "jobName", d.cfg.JobName.ValueOrZero(), "captureEATelemetry", d.cfg.CaptureEATelemetry, "donID", d.cfg.DonID) var merr error diff --git a/core/services/llo/telemetry.go b/core/services/llo/telemetry.go index d5c113c61ef..888ee9d5d36 100644 --- a/core/services/llo/telemetry.go +++ b/core/services/llo/telemetry.go @@ -31,18 +31,19 @@ type TelemeterService interface { services.Service } -func NewTelemeterService(lggr logger.Logger, monitoringEndpoint commontypes.MonitoringEndpoint) TelemeterService { +func NewTelemeterService(lggr logger.Logger, monitoringEndpoint commontypes.MonitoringEndpoint, donID uint32) TelemeterService { if monitoringEndpoint == nil { return NullTelemeter } - return newTelemeter(lggr, monitoringEndpoint) + return newTelemeter(lggr, monitoringEndpoint, donID) } -func newTelemeter(lggr logger.Logger, monitoringEndpoint commontypes.MonitoringEndpoint) *telemeter { +func newTelemeter(lggr logger.Logger, monitoringEndpoint commontypes.MonitoringEndpoint, donID uint32) *telemeter { chTelemetryObservation := make(chan TelemetryObservation, 100) t := &telemeter{ chTelemetryObservation: chTelemetryObservation, monitoringEndpoint: monitoringEndpoint, + donID: donID, } t.Service, t.eng = services.Config{ Name: "LLOTelemeterService", @@ -58,6 +59,7 @@ type telemeter struct { monitoringEndpoint commontypes.MonitoringEndpoint chTelemetryObservation chan TelemetryObservation + donID uint32 } func (t *telemeter) EnqueueV3PremiumLegacy(run *pipeline.Run, trrs pipeline.TaskRunResults, streamID uint32, opts llo.DSOpts, val llo.StreamValue, err error) { @@ -140,6 +142,7 @@ func (t *telemeter) collectV3PremiumLegacyTelemetry(d TelemetryObservation) { Epoch: int64(epoch), AssetSymbol: eaTelem.AssetSymbol, Version: uint32(1000 + mercuryutils.REPORT_V3), // add 1000 to distinguish between legacy feeds, this can be changed if necessary + DonId: t.donID, } bytes, err := proto.Marshal(tea) diff --git a/core/services/llo/telemetry_test.go b/core/services/llo/telemetry_test.go index ec77e959d24..ec650bedb83 100644 --- a/core/services/llo/telemetry_test.go +++ b/core/services/llo/telemetry_test.go @@ -112,10 +112,11 @@ func Test_Telemeter(t *testing.T) { run := &pipeline.Run{ID: 42} streamID := uint32(135) + donID := uint32(1) opts := &mockOpts{} t.Run("with error", func(t *testing.T) { - tm := newTelemeter(lggr, m) + tm := newTelemeter(lggr, m, donID) servicetest.Run(t, tm) t.Run("if error is some random failure returns immediately", func(t *testing.T) { @@ -142,7 +143,7 @@ func Test_Telemeter(t *testing.T) { }) }) t.Run("with decimal value, sets all values correctly", func(t *testing.T) { - tm := newTelemeter(lggr, m) + tm := newTelemeter(lggr, m, donID) val := llo.ToDecimal(decimal.NewFromFloat32(102.12)) servicetest.Run(t, tm) tm.EnqueueV3PremiumLegacy(run, trrs, streamID, opts, val, nil) @@ -184,6 +185,7 @@ func Test_Telemeter(t *testing.T) { assert.Equal(t, int64(18), decoded.Round) assert.Equal(t, int64(4), decoded.Epoch) assert.Equal(t, "eth/usd", decoded.AssetSymbol) + assert.Equal(t, uint32(1), decoded.DonId) if i == 2 { return } @@ -191,7 +193,7 @@ func Test_Telemeter(t *testing.T) { } }) t.Run("with quote value", func(t *testing.T) { - tm := newTelemeter(lggr, m) + tm := newTelemeter(lggr, m, donID) val := &llo.Quote{Bid: decimal.NewFromFloat32(102.12), Benchmark: decimal.NewFromFloat32(103.32), Ask: decimal.NewFromFloat32(104.25)} servicetest.Run(t, tm) tm.EnqueueV3PremiumLegacy(run, trrs, streamID, opts, val, nil) diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index 371eccdbe89..acee4168a5a 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -1050,6 +1050,7 @@ func (d *Delegate) newServicesLLO( ShouldRetireCache: provider.ShouldRetireCache(), RetirementReportCodec: datastreamsllo.StandardRetirementReportCodec{}, EAMonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, telemetryContractID, synchronization.EnhancedEAMercury), + DonID: pluginCfg.DonID, TraceLogging: d.cfg.OCR2().TraceLogging(), BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, diff --git a/core/services/registrysyncer/monitoring.go b/core/services/registrysyncer/monitoring.go index 97fd181515c..027d8a953d8 100644 --- a/core/services/registrysyncer/monitoring.go +++ b/core/services/registrysyncer/monitoring.go @@ -12,39 +12,38 @@ import ( localMonitoring "github.com/smartcontractkit/chainlink/v2/core/monitoring" ) -var remoteRegistrySyncFailureCounter metric.Int64Counter -var launcherFailureCounter metric.Int64Counter +// syncerMetricLabeler wraps monitoring.MetricsLabeler to provide workflow specific utilities +// for monitoring resources +type syncerMetricLabeler struct { + metrics.Labeler + remoteRegistrySyncFailureCounter metric.Int64Counter + launcherFailureCounter metric.Int64Counter +} -func initMonitoringResources() (err error) { - remoteRegistrySyncFailureCounter, err = beholder.GetMeter().Int64Counter("platform_registrysyncer_sync_failures") +func newSyncerMetricLabeler() (*syncerMetricLabeler, error) { + remoteRegistrySyncFailureCounter, err := beholder.GetMeter().Int64Counter("platform_registrysyncer_sync_failures") if err != nil { - return fmt.Errorf("failed to register sync failure counter: %w", err) + return nil, fmt.Errorf("failed to register sync failure counter: %w", err) } - launcherFailureCounter, err = beholder.GetMeter().Int64Counter("platform_registrysyncer_launch_failures") + launcherFailureCounter, err := beholder.GetMeter().Int64Counter("platform_registrysyncer_launch_failures") if err != nil { - return fmt.Errorf("failed to register launcher failure counter: %w", err) + return nil, fmt.Errorf("failed to register launcher failure counter: %w", err) } - return nil -} - -// syncerMetricLabeler wraps monitoring.MetricsLabeler to provide workflow specific utilities -// for monitoring resources -type syncerMetricLabeler struct { - metrics.Labeler + return &syncerMetricLabeler{remoteRegistrySyncFailureCounter: remoteRegistrySyncFailureCounter, launcherFailureCounter: launcherFailureCounter}, nil } -func (c syncerMetricLabeler) with(keyValues ...string) syncerMetricLabeler { - return syncerMetricLabeler{c.With(keyValues...)} +func (c *syncerMetricLabeler) with(keyValues ...string) syncerMetricLabeler { + return syncerMetricLabeler{c.With(keyValues...), c.remoteRegistrySyncFailureCounter, c.launcherFailureCounter} } -func (c syncerMetricLabeler) incrementRemoteRegistryFailureCounter(ctx context.Context) { +func (c *syncerMetricLabeler) incrementRemoteRegistryFailureCounter(ctx context.Context) { otelLabels := localMonitoring.KvMapToOtelAttributes(c.Labels) - remoteRegistrySyncFailureCounter.Add(ctx, 1, metric.WithAttributes(otelLabels...)) + c.remoteRegistrySyncFailureCounter.Add(ctx, 1, metric.WithAttributes(otelLabels...)) } -func (c syncerMetricLabeler) incrementLauncherFailureCounter(ctx context.Context) { +func (c *syncerMetricLabeler) incrementLauncherFailureCounter(ctx context.Context) { otelLabels := localMonitoring.KvMapToOtelAttributes(c.Labels) - launcherFailureCounter.Add(ctx, 1, metric.WithAttributes(otelLabels...)) + c.launcherFailureCounter.Add(ctx, 1, metric.WithAttributes(otelLabels...)) } diff --git a/core/services/registrysyncer/monitoring_test.go b/core/services/registrysyncer/monitoring_test.go index 1ddb6c57997..30d773aa976 100644 --- a/core/services/registrysyncer/monitoring_test.go +++ b/core/services/registrysyncer/monitoring_test.go @@ -9,11 +9,12 @@ import ( ) func Test_InitMonitoringResources(t *testing.T) { - require.NoError(t, initMonitoringResources()) + _, err := newSyncerMetricLabeler() + require.NoError(t, err) } func Test_SyncerMetricsLabeler(t *testing.T) { - testSyncerMetricLabeler := syncerMetricLabeler{metrics.NewLabeler()} + testSyncerMetricLabeler := syncerMetricLabeler{metrics.NewLabeler(), nil, nil} testSyncerMetricLabeler2 := testSyncerMetricLabeler.with("foo", "baz") require.EqualValues(t, testSyncerMetricLabeler2.Labels["foo"], "baz") } diff --git a/core/services/registrysyncer/syncer.go b/core/services/registrysyncer/syncer.go index 5fc241ad249..461824b403b 100644 --- a/core/services/registrysyncer/syncer.go +++ b/core/services/registrysyncer/syncer.go @@ -44,7 +44,7 @@ type RegistrySyncer interface { type registrySyncer struct { services.StateMachine - metrics syncerMetricLabeler + metrics *syncerMetricLabeler stopCh services.StopChan launchers []Launcher reader types.ContractReader @@ -76,7 +76,14 @@ func New( registryAddress string, orm ORM, ) (RegistrySyncer, error) { + + metricLabeler, err := newSyncerMetricLabeler() + if err != nil { + return nil, fmt.Errorf("failed to create syncer metric labeler: %w", err) + } + return ®istrySyncer{ + metrics: metricLabeler, stopCh: make(services.StopChan), updateChan: make(chan *LocalRegistry), lggr: lggr.Named("RegistrySyncer"), @@ -131,11 +138,6 @@ func newReader(ctx context.Context, lggr logger.Logger, relayer ContractReaderFa func (s *registrySyncer) Start(ctx context.Context) error { return s.StartOnce("RegistrySyncer", func() error { - err := initMonitoringResources() - if err != nil { - return err - } - s.wg.Add(1) go func() { defer s.wg.Done() diff --git a/core/services/relay/evm/chain_reader.go b/core/services/relay/evm/chain_reader.go index df216a11c2b..4de739a44b4 100644 --- a/core/services/relay/evm/chain_reader.go +++ b/core/services/relay/evm/chain_reader.go @@ -197,7 +197,8 @@ func (cr *chainReader) GetLatestValue(ctx context.Context, readName string, conf ptrToValue, isValue := returnVal.(*values.Value) if !isValue { - return binding.GetLatestValue(ctx, common.HexToAddress(address), confidenceLevel, params, returnVal) + _, err = binding.GetLatestValueWithHeadData(ctx, common.HexToAddress(address), confidenceLevel, params, returnVal) + return err } contractType, err := cr.CreateContractType(readName, false) @@ -219,6 +220,37 @@ func (cr *chainReader) GetLatestValue(ctx context.Context, readName string, conf return nil } +func (cr *chainReader) GetLatestValueWithHeadData(ctx context.Context, readName string, confidenceLevel primitives.ConfidenceLevel, params any, returnVal any) (head *commontypes.Head, err error) { + binding, address, err := cr.bindings.GetReader(readName) + if err != nil { + return nil, err + } + + ptrToValue, isValue := returnVal.(*values.Value) + if !isValue { + return binding.GetLatestValueWithHeadData(ctx, common.HexToAddress(address), confidenceLevel, params, returnVal) + } + + contractType, err := cr.CreateContractType(readName, false) + if err != nil { + return nil, err + } + + head, err = cr.GetLatestValueWithHeadData(ctx, readName, confidenceLevel, params, contractType) + if err != nil { + return nil, err + } + + value, err := values.Wrap(contractType) + if err != nil { + return nil, err + } + + *ptrToValue = value + + return head, nil +} + func (cr *chainReader) BatchGetLatestValues(ctx context.Context, request commontypes.BatchGetLatestValuesRequest) (commontypes.BatchGetLatestValuesResult, error) { return cr.bindings.BatchGetLatestValues(ctx, request) } diff --git a/core/services/relay/evm/read/bindings.go b/core/services/relay/evm/read/bindings.go index bfeb84a3799..cf675ee383e 100644 --- a/core/services/relay/evm/read/bindings.go +++ b/core/services/relay/evm/read/bindings.go @@ -20,7 +20,7 @@ import ( type Reader interface { BatchCall(address common.Address, params, retVal any) (Call, error) - GetLatestValue(ctx context.Context, addr common.Address, confidence primitives.ConfidenceLevel, params, returnVal any) error + GetLatestValueWithHeadData(ctx context.Context, addr common.Address, confidence primitives.ConfidenceLevel, params, returnVal any) (*commontypes.Head, error) QueryKey(context.Context, common.Address, query.KeyFilter, query.LimitAndSort, any) ([]commontypes.Sequence, error) Bind(context.Context, ...common.Address) error diff --git a/core/services/relay/evm/read/bindings_test.go b/core/services/relay/evm/read/bindings_test.go index d9cfa91a987..129d3138141 100644 --- a/core/services/relay/evm/read/bindings_test.go +++ b/core/services/relay/evm/read/bindings_test.go @@ -73,9 +73,9 @@ func TestBindingsRegistry(t *testing.T) { mReg.EXPECT().HasFilter(mock.Anything).Return(false) mReg.EXPECT().RegisterFilter(mock.Anything, mock.Anything).Return(nil) - mRdr0.EXPECT().GetLatestValue(mock.Anything, common.HexToAddress("0x25"), mock.Anything, mock.Anything, mock.Anything).Return(nil) - mRdr0.EXPECT().GetLatestValue(mock.Anything, common.HexToAddress("0x24"), mock.Anything, mock.Anything, mock.Anything).Return(nil) - mRdr1.EXPECT().GetLatestValue(mock.Anything, common.HexToAddress("0x26"), mock.Anything, mock.Anything, mock.Anything).Return(nil) + mRdr0.EXPECT().GetLatestValueWithHeadData(mock.Anything, common.HexToAddress("0x25"), mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + mRdr0.EXPECT().GetLatestValueWithHeadData(mock.Anything, common.HexToAddress("0x24"), mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + mRdr1.EXPECT().GetLatestValueWithHeadData(mock.Anything, common.HexToAddress("0x26"), mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) // part of the init phase of chain reader require.NoError(t, named.AddReader(contractName1, methodName1, mRdr0)) @@ -100,9 +100,12 @@ func TestBindingsRegistry(t *testing.T) { rdr2, _, err := named.GetReader(bindings[0].ReadIdentifier(methodName2)) require.NoError(t, err) - require.NoError(t, rdr1.GetLatestValue(context.Background(), common.HexToAddress("0x25"), primitives.Finalized, nil, nil)) - require.NoError(t, rdr1.GetLatestValue(context.Background(), common.HexToAddress("0x24"), primitives.Finalized, nil, nil)) - require.NoError(t, rdr2.GetLatestValue(context.Background(), common.HexToAddress("0x26"), primitives.Finalized, nil, nil)) + _, err = rdr1.GetLatestValueWithHeadData(context.Background(), common.HexToAddress("0x25"), primitives.Finalized, nil, nil) + require.NoError(t, err) + _, err = rdr1.GetLatestValueWithHeadData(context.Background(), common.HexToAddress("0x24"), primitives.Finalized, nil, nil) + require.NoError(t, err) + _, err = rdr2.GetLatestValueWithHeadData(context.Background(), common.HexToAddress("0x26"), primitives.Finalized, nil, nil) + require.NoError(t, err) mBatch.AssertExpectations(t) mRdr0.AssertExpectations(t) diff --git a/core/services/relay/evm/read/event.go b/core/services/relay/evm/read/event.go index a1678fbb4b9..c37b979d7ea 100644 --- a/core/services/relay/evm/read/event.go +++ b/core/services/relay/evm/read/event.go @@ -233,7 +233,7 @@ func (b *EventBinding) BatchCall(_ common.Address, _, _ any) (Call, error) { return Call{}, fmt.Errorf("%w: events are not yet supported in batch get latest values", commontypes.ErrInvalidType) } -func (b *EventBinding) GetLatestValue(ctx context.Context, address common.Address, confidenceLevel primitives.ConfidenceLevel, params, into any) (err error) { +func (b *EventBinding) GetLatestValueWithHeadData(ctx context.Context, address common.Address, confidenceLevel primitives.ConfidenceLevel, params, into any) (head *commontypes.Head, err error) { var ( confs evmtypes.Confirmations result *string @@ -256,24 +256,24 @@ func (b *EventBinding) GetLatestValue(ctx context.Context, address common.Addres }() if err = b.validateBound(address); err != nil { - return err + return nil, err } confs, err = confidenceToConfirmations(b.confirmationsMapping, confidenceLevel) if err != nil { - return err + return nil, err } topicTypeID := codec.WrapItemType(b.contractName, b.eventName, true) onChainTypedVal, err := b.toNativeOnChainType(topicTypeID, params) if err != nil { - return err + return nil, err } filterTopics, err := b.extractFilterTopics(topicTypeID, onChainTypedVal) if err != nil { - return err + return nil, err } var log *logpoller.Log @@ -281,26 +281,30 @@ func (b *EventBinding) GetLatestValue(ctx context.Context, address common.Addres var hashedTopics []common.Hash hashedTopics, err = b.hashTopics(topicTypeID, filterTopics) if err != nil { - return err + return nil, err } if log, err = b.getLatestLog(ctx, address, confs, hashedTopics); err != nil { - return err + return nil, err } } else { if log, err = b.lp.LatestLogByEventSigWithConfs(ctx, b.hash, address, confs); err != nil { - return wrapInternalErr(err) + return nil, wrapInternalErr(err) } } - if err := b.decodeLog(ctx, log, into); err != nil { + if err = b.decodeLog(ctx, log, into); err != nil { encoded := hex.EncodeToString(log.Data) result = &encoded - - return err + return nil, err } - return nil + return &commontypes.Head{ + Height: strconv.FormatInt(log.BlockNumber, 10), + Hash: log.BlockHash.Bytes(), + //nolint:gosec // G115 + Timestamp: uint64(log.BlockTimestamp.Unix()), + }, nil } func (b *EventBinding) QueryKey(ctx context.Context, address common.Address, filter query.KeyFilter, limitAndSort query.LimitAndSort, sequenceDataType any) (sequences []commontypes.Sequence, err error) { diff --git a/core/services/relay/evm/read/method.go b/core/services/relay/evm/read/method.go index fc7886b74b7..393077c6d3f 100644 --- a/core/services/relay/evm/read/method.go +++ b/core/services/relay/evm/read/method.go @@ -121,14 +121,19 @@ func (b *MethodBinding) BatchCall(address common.Address, params, retVal any) (C }, nil } -func (b *MethodBinding) GetLatestValue(ctx context.Context, addr common.Address, confidenceLevel primitives.ConfidenceLevel, params, returnVal any) error { +func (b *MethodBinding) GetLatestValueWithHeadData(ctx context.Context, addr common.Address, confidenceLevel primitives.ConfidenceLevel, params, returnVal any) (*commontypes.Head, error) { if !b.isBound(addr) { - return fmt.Errorf("%w: %w", commontypes.ErrInvalidConfig, newUnboundAddressErr(addr.Hex(), b.contractName, b.method)) + return nil, fmt.Errorf("%w: %w", commontypes.ErrInvalidConfig, newUnboundAddressErr(addr.Hex(), b.contractName, b.method)) } - block, err := b.blockNumberFromConfidence(ctx, confidenceLevel) + block, confirmations, err := b.blockAndConfirmationsFromConfidence(ctx, confidenceLevel) if err != nil { - return err + return nil, err + } + + var blockNum *big.Int + if block != nil && confirmations != evmtypes.Unconfirmed { + blockNum = big.NewInt(block.Number) } data, err := b.codec.Encode(ctx, params, codec.WrapItemType(b.contractName, b.method, true)) @@ -141,9 +146,9 @@ func (b *MethodBinding) GetLatestValue(ctx context.Context, addr common.Address, ReadName: b.method, Params: params, ReturnVal: returnVal, - }, block.String(), false) + }, blockNum.String(), false) - return callErr + return nil, callErr } callMsg := ethereum.CallMsg{ @@ -152,7 +157,7 @@ func (b *MethodBinding) GetLatestValue(ctx context.Context, addr common.Address, Data: data, } - bytes, err := b.client.CallContract(ctx, callMsg, block) + bytes, err := b.client.CallContract(ctx, callMsg, blockNum) if err != nil { callErr := newErrorFromCall( fmt.Errorf("%w: contract call: %s", commontypes.ErrInvalidType, err.Error()), @@ -162,9 +167,9 @@ func (b *MethodBinding) GetLatestValue(ctx context.Context, addr common.Address, ReadName: b.method, Params: params, ReturnVal: returnVal, - }, block.String(), false) + }, blockNum.String(), false) - return callErr + return nil, callErr } if err = b.codec.Decode(ctx, bytes, returnVal, codec.WrapItemType(b.contractName, b.method, false)); err != nil { @@ -176,15 +181,15 @@ func (b *MethodBinding) GetLatestValue(ctx context.Context, addr common.Address, ReadName: b.method, Params: params, ReturnVal: returnVal, - }, block.String(), false) + }, blockNum.String(), false) strResult := hexutil.Encode(bytes) callErr.Result = &strResult - return callErr + return nil, callErr } - return nil + return block.ToChainAgnosticHead(), nil } func (b *MethodBinding) QueryKey( @@ -200,31 +205,31 @@ func (b *MethodBinding) QueryKey( func (b *MethodBinding) Register(_ context.Context) error { return nil } func (b *MethodBinding) Unregister(_ context.Context) error { return nil } -func (b *MethodBinding) blockNumberFromConfidence(ctx context.Context, confidenceLevel primitives.ConfidenceLevel) (*big.Int, error) { +func (b *MethodBinding) blockAndConfirmationsFromConfidence(ctx context.Context, confidenceLevel primitives.ConfidenceLevel) (*evmtypes.Head, evmtypes.Confirmations, error) { confirmations, err := confidenceToConfirmations(b.confirmationsMapping, confidenceLevel) if err != nil { - err = fmt.Errorf("%w: contract: %s; method: %s;", err, b.contractName, b.method) + err = fmt.Errorf("%w: contract: %s; method: %s", err, b.contractName, b.method) if confidenceLevel == primitives.Unconfirmed { b.lggr.Debugw("Falling back to default contract call behaviour that calls latest state", "contract", b.contractName, "method", b.method, "err", err) - return nil, nil + return nil, 0, err } - return nil, err + return nil, 0, err } - _, finalized, err := b.ht.LatestAndFinalizedBlock(ctx) + latest, finalized, err := b.ht.LatestAndFinalizedBlock(ctx) if err != nil { - return nil, fmt.Errorf("%w: head tracker: %w", commontypes.ErrInternal, err) + return nil, 0, fmt.Errorf("%w: head tracker: %w", commontypes.ErrInternal, err) } if confirmations == evmtypes.Finalized { - return big.NewInt(finalized.Number), nil + return finalized, confirmations, nil } else if confirmations == evmtypes.Unconfirmed { - return nil, nil + return latest, confirmations, nil } - return nil, fmt.Errorf("%w: [unknown evm confirmations]: %v; contract: %s; method: %s;", commontypes.ErrInvalidConfig, confirmations, b.contractName, b.method) + return nil, 0, fmt.Errorf("%w: [unknown evm confirmations]: %v; contract: %s; method: %s", commontypes.ErrInvalidConfig, confirmations, b.contractName, b.method) } func (b *MethodBinding) isBound(binding common.Address) bool { diff --git a/core/services/relay/evm/read/mocks/reader.go b/core/services/relay/evm/read/mocks/reader.go index b259b3cdcb1..79df3cf4025 100644 --- a/core/services/relay/evm/read/mocks/reader.go +++ b/core/services/relay/evm/read/mocks/reader.go @@ -150,52 +150,64 @@ func (_c *Reader_Bind_Call) RunAndReturn(run func(context.Context, ...common.Add return _c } -// GetLatestValue provides a mock function with given fields: ctx, addr, confidence, params, returnVal -func (_m *Reader) GetLatestValue(ctx context.Context, addr common.Address, confidence primitives.ConfidenceLevel, params any, returnVal any) error { +// GetLatestValueWithHeadData provides a mock function with given fields: ctx, addr, confidence, params, returnVal +func (_m *Reader) GetLatestValueWithHeadData(ctx context.Context, addr common.Address, confidence primitives.ConfidenceLevel, params any, returnVal any) (*types.Head, error) { ret := _m.Called(ctx, addr, confidence, params, returnVal) if len(ret) == 0 { - panic("no return value specified for GetLatestValue") + panic("no return value specified for GetLatestValueWithHeadData") } - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address, primitives.ConfidenceLevel, any, any) error); ok { + var r0 *types.Head + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, primitives.ConfidenceLevel, any, any) (*types.Head, error)); ok { + return rf(ctx, addr, confidence, params, returnVal) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, primitives.ConfidenceLevel, any, any) *types.Head); ok { r0 = rf(ctx, addr, confidence, params, returnVal) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Head) + } } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, common.Address, primitives.ConfidenceLevel, any, any) error); ok { + r1 = rf(ctx, addr, confidence, params, returnVal) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// Reader_GetLatestValue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestValue' -type Reader_GetLatestValue_Call struct { +// Reader_GetLatestValueWithHeadData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestValueWithHeadData' +type Reader_GetLatestValueWithHeadData_Call struct { *mock.Call } -// GetLatestValue is a helper method to define mock.On call +// GetLatestValueWithHeadData is a helper method to define mock.On call // - ctx context.Context // - addr common.Address // - confidence primitives.ConfidenceLevel // - params any // - returnVal any -func (_e *Reader_Expecter) GetLatestValue(ctx interface{}, addr interface{}, confidence interface{}, params interface{}, returnVal interface{}) *Reader_GetLatestValue_Call { - return &Reader_GetLatestValue_Call{Call: _e.mock.On("GetLatestValue", ctx, addr, confidence, params, returnVal)} +func (_e *Reader_Expecter) GetLatestValueWithHeadData(ctx interface{}, addr interface{}, confidence interface{}, params interface{}, returnVal interface{}) *Reader_GetLatestValueWithHeadData_Call { + return &Reader_GetLatestValueWithHeadData_Call{Call: _e.mock.On("GetLatestValueWithHeadData", ctx, addr, confidence, params, returnVal)} } -func (_c *Reader_GetLatestValue_Call) Run(run func(ctx context.Context, addr common.Address, confidence primitives.ConfidenceLevel, params any, returnVal any)) *Reader_GetLatestValue_Call { +func (_c *Reader_GetLatestValueWithHeadData_Call) Run(run func(ctx context.Context, addr common.Address, confidence primitives.ConfidenceLevel, params any, returnVal any)) *Reader_GetLatestValueWithHeadData_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(common.Address), args[2].(primitives.ConfidenceLevel), args[3].(any), args[4].(any)) }) return _c } -func (_c *Reader_GetLatestValue_Call) Return(_a0 error) *Reader_GetLatestValue_Call { - _c.Call.Return(_a0) +func (_c *Reader_GetLatestValueWithHeadData_Call) Return(_a0 *types.Head, _a1 error) *Reader_GetLatestValueWithHeadData_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *Reader_GetLatestValue_Call) RunAndReturn(run func(context.Context, common.Address, primitives.ConfidenceLevel, any, any) error) *Reader_GetLatestValue_Call { +func (_c *Reader_GetLatestValueWithHeadData_Call) RunAndReturn(run func(context.Context, common.Address, primitives.ConfidenceLevel, any, any) (*types.Head, error)) *Reader_GetLatestValueWithHeadData_Call { _c.Call.Return(run) return _c } diff --git a/core/services/relay/evm/write_target_test.go b/core/services/relay/evm/write_target_test.go index ce169554768..24d7dd8646e 100644 --- a/core/services/relay/evm/write_target_test.go +++ b/core/services/relay/evm/write_target_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink-common/pkg/capabilities" + commonTypes "github.com/smartcontractkit/chainlink-common/pkg/types" "github.com/smartcontractkit/chainlink-common/pkg/values" "github.com/smartcontractkit/chainlink/v2/common/headtracker/mocks" @@ -110,6 +111,8 @@ func TestEvmWrite(t *testing.T) { evmClient.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(mockCall, nil).Maybe() evmClient.On("CodeAt", mock.Anything, mock.Anything, mock.Anything).Return([]byte("test"), nil) + txManager.On("GetTransactionStatus", mock.Anything, mock.Anything).Return(commonTypes.Finalized, nil) + chain.On("ID").Return(big.NewInt(11155111)) chain.On("TxManager").Return(txManager) chain.On("LogPoller").Return(poller) diff --git a/core/services/standardcapabilities/delegate.go b/core/services/standardcapabilities/delegate.go index 80a60c334fc..a92e082dead 100644 --- a/core/services/standardcapabilities/delegate.go +++ b/core/services/standardcapabilities/delegate.go @@ -237,14 +237,14 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, spec job.Job) ([]job.Ser return nil, errors.New("config is empty") } - var fetchCfg webapi.ServiceConfig - err := toml.Unmarshal([]byte(spec.StandardCapabilitiesSpec.Config), &fetchCfg) + var cfg compute.Config + err := toml.Unmarshal([]byte(spec.StandardCapabilitiesSpec.Config), &cfg) if err != nil { return nil, err } lggr := d.logger.Named("ComputeAction") - handler, err := webapi.NewOutgoingConnectorHandler(d.gatewayConnectorWrapper.GetGatewayConnector(), fetchCfg, capabilities.MethodComputeAction, lggr) + handler, err := webapi.NewOutgoingConnectorHandler(d.gatewayConnectorWrapper.GetGatewayConnector(), cfg.ServiceConfig, capabilities.MethodComputeAction, lggr) if err != nil { return nil, err } @@ -253,7 +253,7 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, spec job.Job) ([]job.Ser return uuid.New().String() } - computeSrvc := compute.NewAction(fetchCfg, log, d.registry, handler, idGeneratorFn) + computeSrvc := compute.NewAction(cfg, log, d.registry, handler, idGeneratorFn) return []job.ServiceCtx{computeSrvc}, nil } diff --git a/core/services/synchronization/telem/telem_enhanced_ea_mercury.pb.go b/core/services/synchronization/telem/telem_enhanced_ea_mercury.pb.go index 09eed12ee8a..34f4b3e349b 100644 --- a/core/services/synchronization/telem/telem_enhanced_ea_mercury.pb.go +++ b/core/services/synchronization/telem/telem_enhanced_ea_mercury.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.2 -// protoc v5.28.0 +// protoc v5.28.3 // source: core/services/synchronization/telem/telem_enhanced_ea_mercury.proto package telem @@ -115,6 +115,7 @@ type EnhancedEAMercury struct { Round int64 `protobuf:"varint,19,opt,name=round,proto3" json:"round,omitempty"` Epoch int64 `protobuf:"varint,20,opt,name=epoch,proto3" json:"epoch,omitempty"` AssetSymbol string `protobuf:"bytes,21,opt,name=asset_symbol,json=assetSymbol,proto3" json:"asset_symbol,omitempty"` + DonId uint32 `protobuf:"varint,36,opt,name=don_id,json=donId,proto3" json:"don_id,omitempty"` } func (x *EnhancedEAMercury) Reset() { @@ -394,6 +395,13 @@ func (x *EnhancedEAMercury) GetAssetSymbol() string { return "" } +func (x *EnhancedEAMercury) GetDonId() uint32 { + if x != nil { + return x.DonId + } + return 0 +} + var File_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto protoreflect.FileDescriptor var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDesc = []byte{ @@ -401,7 +409,7 @@ var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_raw 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x65, 0x6e, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x65, 0x61, 0x5f, 0x6d, 0x65, 0x72, 0x63, 0x75, 0x72, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x22, 0xaa, 0x0d, 0x0a, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x22, 0xc1, 0x0d, 0x0a, 0x11, 0x45, 0x6e, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x64, 0x45, 0x41, 0x4d, 0x65, 0x72, 0x63, 0x75, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, @@ -508,16 +516,17 @@ var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_raw 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x73, - 0x73, 0x65, 0x74, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x2a, 0x31, 0x0a, 0x0c, 0x4d, 0x61, 0x72, - 0x6b, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, - 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x02, 0x42, 0x4e, 0x5a, 0x4c, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6d, 0x61, 0x72, 0x74, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6b, 0x69, 0x74, 0x2f, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x65, 0x74, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x15, 0x0a, 0x06, 0x64, 0x6f, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x64, 0x6f, 0x6e, 0x49, 0x64, + 0x2a, 0x31, 0x0a, 0x0c, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, + 0x4e, 0x10, 0x02, 0x42, 0x4e, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6b, + 0x69, 0x74, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, 0x76, 0x32, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x79, + 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/core/services/synchronization/telem/telem_enhanced_ea_mercury.proto b/core/services/synchronization/telem/telem_enhanced_ea_mercury.proto index d57b7ca836a..cfb8dbac0c9 100644 --- a/core/services/synchronization/telem/telem_enhanced_ea_mercury.proto +++ b/core/services/synchronization/telem/telem_enhanced_ea_mercury.proto @@ -59,4 +59,5 @@ message EnhancedEAMercury { int64 round=19; int64 epoch=20; string asset_symbol=21; + uint32 don_id=36; } diff --git a/core/services/workflows/engine.go b/core/services/workflows/engine.go index 8e2cb8e34cb..e20af85540d 100644 --- a/core/services/workflows/engine.go +++ b/core/services/workflows/engine.go @@ -113,6 +113,7 @@ type Engine struct { newWorkerTimeout time.Duration maxExecutionDuration time.Duration heartbeatCadence time.Duration + stepTimeoutDuration time.Duration // testing lifecycle hook to signal when an execution is finished. onExecutionFinished func(string) @@ -755,7 +756,10 @@ func (e *Engine) workerForStepRequest(ctx context.Context, msg stepRequest) { // TODO ks-462 inputs logCustMsg(ctx, cma, "executing step", l) - inputs, outputs, err := e.executeStep(ctx, l, msg) + stepCtx, cancel := context.WithTimeout(ctx, e.stepTimeoutDuration) + defer cancel() + + inputs, outputs, err := e.executeStep(stepCtx, l, msg) var stepStatus string switch { case errors.Is(capabilities.ErrStopExecution, err): @@ -1137,6 +1141,7 @@ type Config struct { Binary []byte SecretsFetcher secretsFetcher HeartbeatCadence time.Duration + StepTimeout time.Duration // For testing purposes only maxRetries int @@ -1152,6 +1157,7 @@ const ( defaultNewWorkerTimeout = 2 * time.Second defaultMaxExecutionDuration = 10 * time.Minute defaultHeartbeatCadence = 5 * time.Minute + defaultStepTimeout = 2 * time.Minute ) func NewEngine(ctx context.Context, cfg Config) (engine *Engine, err error) { @@ -1183,6 +1189,10 @@ func NewEngine(ctx context.Context, cfg Config) (engine *Engine, err error) { cfg.HeartbeatCadence = defaultHeartbeatCadence } + if cfg.StepTimeout == 0 { + cfg.StepTimeout = defaultStepTimeout + } + if cfg.retryMs == 0 { cfg.retryMs = 5000 } @@ -1235,6 +1245,7 @@ func NewEngine(ctx context.Context, cfg Config) (engine *Engine, err error) { triggerEvents: make(chan capabilities.TriggerResponse), stopCh: make(chan struct{}), newWorkerTimeout: cfg.NewWorkerTimeout, + stepTimeoutDuration: cfg.StepTimeout, maxExecutionDuration: cfg.MaxExecutionDuration, heartbeatCadence: cfg.HeartbeatCadence, onExecutionFinished: cfg.onExecutionFinished, diff --git a/core/services/workflows/engine_test.go b/core/services/workflows/engine_test.go index 5e87d4f7603..e6667fe0bc6 100644 --- a/core/services/workflows/engine_test.go +++ b/core/services/workflows/engine_test.go @@ -1429,19 +1429,21 @@ func TestEngine_WithCustomComputeStep(t *testing.T) { ctx := testutils.Context(t) log := logger.TestLogger(t) reg := coreCap.NewRegistry(logger.TestLogger(t)) - cfg := webapi.ServiceConfig{ - RateLimiter: common.RateLimiterConfig{ - GlobalRPS: 100.0, - GlobalBurst: 100, - PerSenderRPS: 100.0, - PerSenderBurst: 100, + cfg := compute.Config{ + ServiceConfig: webapi.ServiceConfig{ + RateLimiter: common.RateLimiterConfig{ + GlobalRPS: 100.0, + GlobalBurst: 100, + PerSenderRPS: 100.0, + PerSenderBurst: 100, + }, }, } connector := gcmocks.NewGatewayConnector(t) handler, err := webapi.NewOutgoingConnectorHandler( connector, - cfg, + cfg.ServiceConfig, ghcapabilities.MethodComputeAction, log) require.NoError(t, err) @@ -1493,18 +1495,20 @@ func TestEngine_CustomComputePropagatesBreaks(t *testing.T) { ctx := testutils.Context(t) log := logger.TestLogger(t) reg := coreCap.NewRegistry(logger.TestLogger(t)) - cfg := webapi.ServiceConfig{ - RateLimiter: common.RateLimiterConfig{ - GlobalRPS: 100.0, - GlobalBurst: 100, - PerSenderRPS: 100.0, - PerSenderBurst: 100, + cfg := compute.Config{ + ServiceConfig: webapi.ServiceConfig{ + RateLimiter: common.RateLimiterConfig{ + GlobalRPS: 100.0, + GlobalBurst: 100, + PerSenderRPS: 100.0, + PerSenderBurst: 100, + }, }, } connector := gcmocks.NewGatewayConnector(t) handler, err := webapi.NewOutgoingConnectorHandler( connector, - cfg, + cfg.ServiceConfig, ghcapabilities.MethodComputeAction, log) require.NoError(t, err) diff --git a/core/web/presenters/csa_key_test.go b/core/web/presenters/csa_key_test.go index 06f84db7dd5..d514519fafd 100644 --- a/core/web/presenters/csa_key_test.go +++ b/core/web/presenters/csa_key_test.go @@ -9,15 +9,13 @@ import ( "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey" - "github.com/smartcontractkit/chainlink/v2/core/utils" ) func TestCSAKeyResource(t *testing.T) { - key, err := csakey.New("passphrase", utils.FastScryptParams) + keyV2, err := csakey.NewV2() require.NoError(t, err) - key.ID = 1 - r := NewCSAKeyResource(key.ToV2()) + r := NewCSAKeyResource(keyV2) b, err := jsonapi.Marshal(r) require.NoError(t, err) @@ -25,13 +23,13 @@ func TestCSAKeyResource(t *testing.T) { { "data":{ "type":"csaKeys", - "id":"%s", + "id":"%[1]s", "attributes":{ - "publicKey": "csa_%s", + "publicKey": "csa_%[1]s", "version": 1 } } - }`, key.PublicKey.String(), key.PublicKey.String()) + }`, keyV2.PublicKeyString()) assert.JSONEq(t, expected, string(b)) } diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml index 3ae24d81f40..bfb0dcb9961 100644 --- a/core/web/resolver/testdata/config-full.toml +++ b/core/web/resolver/testdata/config-full.toml @@ -491,6 +491,7 @@ OCR2CacheTTL = '1h0m0s' TxTimeout = '1h0m0s' TxRetryTimeout = '1m0s' TxConfirmTimeout = '1s' +TxRetentionTimeout = '0s' SkipPreflight = true Commitment = 'banana' MaxRetries = 7 diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml index ea8022fa6ae..074cb82482b 100644 --- a/core/web/resolver/testdata/config-multi-chain-effective.toml +++ b/core/web/resolver/testdata/config-multi-chain-effective.toml @@ -653,6 +653,7 @@ OCR2CacheTTL = '1m0s' TxTimeout = '1m0s' TxRetryTimeout = '10s' TxConfirmTimeout = '30s' +TxRetentionTimeout = '0s' SkipPreflight = true Commitment = 'confirmed' MaxRetries = 12 @@ -697,6 +698,7 @@ OCR2CacheTTL = '1m0s' TxTimeout = '1m0s' TxRetryTimeout = '10s' TxConfirmTimeout = '30s' +TxRetentionTimeout = '0s' SkipPreflight = true Commitment = 'confirmed' MaxRetries = 0 diff --git a/core/web/solana_chains_controller_test.go b/core/web/solana_chains_controller_test.go index 56605f734aa..4aa0dbe579d 100644 --- a/core/web/solana_chains_controller_test.go +++ b/core/web/solana_chains_controller_test.go @@ -49,6 +49,7 @@ OCR2CacheTTL = '1m0s' TxTimeout = '1h0m0s' TxRetryTimeout = '10s' TxConfirmTimeout = '30s' +TxRetentionTimeout = '0s' SkipPreflight = false Commitment = 'confirmed' MaxRetries = 0 diff --git a/deployment/ccip/add_lane_test.go b/deployment/ccip/add_lane_test.go index 223d978b814..02fea79c911 100644 --- a/deployment/ccip/add_lane_test.go +++ b/deployment/ccip/add_lane_test.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" + commonutils "github.com/smartcontractkit/chainlink-common/pkg/utils" "github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext" "github.com/smartcontractkit/chainlink/deployment" @@ -119,7 +120,7 @@ func TestAddLane(t *testing.T) { ExtraArgs: nil, }) require.Equal(t, uint64(1), seqNum2) - require.NoError(t, ConfirmExecWithSeqNr(t, e.Env.Chains[chain2], e.Env.Chains[chain1], state.Chains[chain1].OffRamp, &startBlock2, seqNum2)) + require.NoError(t, commonutils.JustError(ConfirmExecWithSeqNr(t, e.Env.Chains[chain2], e.Env.Chains[chain1], state.Chains[chain1].OffRamp, &startBlock2, seqNum2))) // now check for the previous message from chain 1 to chain 2 that it has not been executed till now as the onRamp was disabled ConfirmNoExecConsistentlyWithSeqNr(t, e.Env.Chains[chain1], e.Env.Chains[chain2], state.Chains[chain2].OffRamp, seqNum1, 30*time.Second) @@ -145,5 +146,5 @@ func TestAddLane(t *testing.T) { ReplayLogs(t, e.Env.Offchain, replayBlocks) time.Sleep(30 * time.Second) // Now that the onRamp is enabled, the request should be processed - require.NoError(t, ConfirmExecWithSeqNr(t, e.Env.Chains[chain1], e.Env.Chains[chain2], state.Chains[chain2].OffRamp, &startBlock, seqNum1)) + require.NoError(t, commonutils.JustError(ConfirmExecWithSeqNr(t, e.Env.Chains[chain1], e.Env.Chains[chain2], state.Chains[chain2].OffRamp, &startBlock, seqNum1))) } diff --git a/deployment/ccip/changeset/add_chain_test.go b/deployment/ccip/changeset/add_chain_test.go index ff02430fd51..6a87bdd0a0a 100644 --- a/deployment/ccip/changeset/add_chain_test.go +++ b/deployment/ccip/changeset/add_chain_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/require" cciptypes "github.com/smartcontractkit/chainlink-ccip/pkg/types/ccipocr3" + commonutils "github.com/smartcontractkit/chainlink-common/pkg/utils" "github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext" @@ -224,7 +225,7 @@ func TestAddChainInbound(t *testing.T) { cciptypes.SeqNum(seqNr), })) require.NoError(t, - ccipdeployment.ConfirmExecWithSeqNr(t, e.Env.Chains[initialDeploy[0]], e.Env.Chains[newChain], state.Chains[newChain].OffRamp, &startBlock, seqNr)) + commonutils.JustError(ccipdeployment.ConfirmExecWithSeqNr(t, e.Env.Chains[initialDeploy[0]], e.Env.Chains[newChain], state.Chains[newChain].OffRamp, &startBlock, seqNr))) linkAddress := state.Chains[newChain].LinkToken.Address() feeQuoter := state.Chains[newChain].FeeQuoter diff --git a/deployment/ccip/changeset/home_chain.go b/deployment/ccip/changeset/home_chain.go index 5fa5cab5b21..7d7f64a8bb8 100644 --- a/deployment/ccip/changeset/home_chain.go +++ b/deployment/ccip/changeset/home_chain.go @@ -13,14 +13,11 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry" ) -var _ deployment.ChangeSet = DeployHomeChain +var _ deployment.ChangeSet[DeployHomeChainConfig] = DeployHomeChain // DeployHomeChain is a separate changeset because it is a standalone deployment performed once in home chain for the entire CCIP deployment. -func DeployHomeChain(env deployment.Environment, config interface{}) (deployment.ChangesetOutput, error) { - cfg, ok := config.(DeployHomeChainConfig) - if !ok { - return deployment.ChangesetOutput{}, deployment.ErrInvalidConfig - } +func DeployHomeChain(env deployment.Environment, cfg DeployHomeChainConfig) (deployment.ChangesetOutput, error) { + err := cfg.Validate() if err != nil { return deployment.ChangesetOutput{}, errors.Wrapf(deployment.ErrInvalidConfig, "%v", err) diff --git a/deployment/ccip/changeset/initial_deploy.go b/deployment/ccip/changeset/initial_deploy.go index f9d7caf44a3..de17834e8bd 100644 --- a/deployment/ccip/changeset/initial_deploy.go +++ b/deployment/ccip/changeset/initial_deploy.go @@ -8,13 +8,9 @@ import ( ccipdeployment "github.com/smartcontractkit/chainlink/deployment/ccip" ) -var _ deployment.ChangeSet = InitialDeploy +var _ deployment.ChangeSet[ccipdeployment.DeployCCIPContractConfig] = InitialDeploy -func InitialDeploy(env deployment.Environment, config interface{}) (deployment.ChangesetOutput, error) { - c, ok := config.(ccipdeployment.DeployCCIPContractConfig) - if !ok { - return deployment.ChangesetOutput{}, deployment.ErrInvalidConfig - } +func InitialDeploy(env deployment.Environment, c ccipdeployment.DeployCCIPContractConfig) (deployment.ChangesetOutput, error) { newAddresses := deployment.NewMemoryAddressBook() err := ccipdeployment.DeployCCIPContracts(env, newAddresses, c) if err != nil { diff --git a/deployment/ccip/test_assertions.go b/deployment/ccip/test_assertions.go index 373610531a1..64d1eb8571c 100644 --- a/deployment/ccip/test_assertions.go +++ b/deployment/ccip/test_assertions.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/big" + "sync" "testing" "time" @@ -13,6 +14,7 @@ import ( "golang.org/x/sync/errgroup" "github.com/smartcontractkit/chainlink-ccip/pkg/types/ccipocr3" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" "github.com/smartcontractkit/chainlink/deployment/environment/memory" "github.com/smartcontractkit/chainlink/deployment" @@ -248,6 +250,26 @@ func ConfirmCommitWithExpectedSeqNumRange( } t.Logf("Waiting for commit report on chain selector %d from source selector %d expected seq nr range %s", dest.Selector, src.Selector, expectedSeqNumRange.String()) + + // Need to do this because the subscription sometimes fails to get the event. + iter, err := offRamp.FilterCommitReportAccepted(&bind.FilterOpts{ + Context: tests.Context(t), + }) + require.NoError(t, err) + for iter.Next() { + event := iter.Event + if len(event.MerkleRoots) > 0 { + for _, mr := range event.MerkleRoots { + if mr.SourceChainSelector == src.Selector && + uint64(expectedSeqNumRange.Start()) >= mr.MinSeqNr && + uint64(expectedSeqNumRange.End()) <= mr.MaxSeqNr { + t.Logf("Received commit report for [%d, %d] on selector %d from source selector %d expected seq nr range %s, token prices: %v", + mr.MinSeqNr, mr.MaxSeqNr, dest.Selector, src.Selector, expectedSeqNumRange.String(), event.PriceUpdates.TokenPriceUpdates) + return nil + } + } + } + } case subErr := <-subscription.Err(): return fmt.Errorf("subscription error: %w", subErr) case <-timer.C: @@ -272,6 +294,7 @@ func ConfirmCommitWithExpectedSeqNumRange( } // ConfirmExecWithSeqNrForAll waits for all chains in the environment to execute the given expectedSeqNums. +// If successful, it returns a map that maps the expected sequence numbers to their respective execution state. // expectedSeqNums is a map of destination chain selector to expected sequence number // startBlocks is a map of destination chain selector to start block number to start watching from. // If startBlocks is nil, it will start watching from the latest block. @@ -281,8 +304,12 @@ func ConfirmExecWithSeqNrForAll( state CCIPOnChainState, expectedSeqNums map[uint64]uint64, startBlocks map[uint64]*uint64, -) { - var wg errgroup.Group +) (executionStates map[uint64]int) { + var ( + wg errgroup.Group + mx sync.Mutex + ) + executionStates = make(map[uint64]int) for src, srcChain := range e.Chains { for dest, dstChain := range e.Chains { if src == dest { @@ -300,7 +327,7 @@ func ConfirmExecWithSeqNrForAll( return nil } - return ConfirmExecWithSeqNr( + executionState, err := ConfirmExecWithSeqNr( t, srcChain, dstChain, @@ -308,10 +335,20 @@ func ConfirmExecWithSeqNrForAll( startBlock, expectedSeqNums[dstChain.Selector], ) + if err != nil { + return err + } + + mx.Lock() + executionStates[expectedSeqNums[dstChain.Selector]] = executionState + mx.Unlock() + + return nil }) } } require.NoError(t, wg.Wait()) + return executionStates } // ConfirmExecWithSeqNr waits for an execution state change on the destination chain with the expected sequence number. @@ -323,7 +360,7 @@ func ConfirmExecWithSeqNr( offRamp *offramp.OffRamp, startBlock *uint64, expectedSeqNr uint64, -) error { +) (executionState int, err error) { timer := time.NewTimer(5 * time.Minute) defer timer.Stop() tick := time.NewTicker(5 * time.Second) @@ -334,7 +371,7 @@ func ConfirmExecWithSeqNr( Start: startBlock, }, sink, nil, nil, nil) if err != nil { - return fmt.Errorf("error to subscribe ExecutionStateChanged : %w", err) + return -1, fmt.Errorf("error to subscribe ExecutionStateChanged : %w", err) } defer subscription.Unsubscribe() for { @@ -346,7 +383,7 @@ func ConfirmExecWithSeqNr( if executionState == EXECUTION_STATE_SUCCESS || executionState == EXECUTION_STATE_FAILURE { t.Logf("Observed %s execution state on chain %d (offramp %s) from chain %d with expected sequence number %d", executionStateToString(executionState), dest.Selector, offRamp.Address().String(), source.Selector, expectedSeqNr) - return nil + return int(executionState), nil } case execEvent := <-sink: t.Logf("Received ExecutionStateChanged (state %s) for seqNum %d on chain %d (offramp %s) from chain %d", @@ -354,13 +391,13 @@ func ConfirmExecWithSeqNr( if execEvent.SequenceNumber == expectedSeqNr && execEvent.SourceChainSelector == source.Selector { t.Logf("Received ExecutionStateChanged (state %s) on chain %d (offramp %s) from chain %d with expected sequence number %d", executionStateToString(execEvent.State), dest.Selector, offRamp.Address().String(), source.Selector, expectedSeqNr) - return nil + return int(execEvent.State), nil } case <-timer.C: - return fmt.Errorf("timed out waiting for ExecutionStateChanged on chain %d (offramp %s) from chain %d with expected sequence number %d", + return -1, fmt.Errorf("timed out waiting for ExecutionStateChanged on chain %d (offramp %s) from chain %d with expected sequence number %d", dest.Selector, offRamp.Address().String(), source.Selector, expectedSeqNr) case subErr := <-subscription.Err(): - return fmt.Errorf("subscription error: %w", subErr) + return -1, fmt.Errorf("subscription error: %w", subErr) } } } diff --git a/deployment/ccip/test_helpers.go b/deployment/ccip/test_helpers.go index f62b8e15c79..f12a475bc2f 100644 --- a/deployment/ccip/test_helpers.go +++ b/deployment/ccip/test_helpers.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/pkg/errors" cciptypes "github.com/smartcontractkit/chainlink-ccip/pkg/types/ccipocr3" + commonutils "github.com/smartcontractkit/chainlink-common/pkg/utils" "github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/burn_mint_erc677" @@ -169,14 +170,14 @@ func NewMemoryEnvironment(t *testing.T, lggr logger.Logger, numChains int, numNo require.GreaterOrEqual(t, numChains, 2, "numChains must be at least 2 for home and feed chains") require.GreaterOrEqual(t, numNodes, 4, "numNodes must be at least 4") ctx := testcontext.Get(t) - chains, evmChains := memory.NewMemoryChains(t, numChains) + chains := memory.NewMemoryChains(t, numChains) homeChainSel, feedSel := allocateCCIPChainSelectors(chains) replayBlocks, err := LatestBlocksByChain(ctx, chains) require.NoError(t, err) ab := deployment.NewMemoryAddressBook() crConfig := DeployTestContracts(t, lggr, ab, homeChainSel, feedSel, chains) - nodes := memory.NewNodes(t, zapcore.InfoLevel, evmChains, numNodes, 1, crConfig) + nodes := memory.NewNodes(t, zapcore.InfoLevel, chains, numNodes, 1, crConfig) for _, node := range nodes { require.NoError(t, node.App.Start(ctx)) t.Cleanup(func() { @@ -287,7 +288,9 @@ func TestSendRequest( return seqNum } -func MakeExtraArgsV2(gasLimit uint64, allowOOO bool) []byte { +// MakeEVMExtraArgsV2 creates the extra args for the EVM2Any message that is destined +// for an EVM chain. The extra args contain the gas limit and allow out of order flag. +func MakeEVMExtraArgsV2(gasLimit uint64, allowOOO bool) []byte { // extra args is the tag followed by the gas limit and allowOOO abi-encoded. var extraArgs []byte extraArgs = append(extraArgs, evmExtraArgsV2Tag...) @@ -454,8 +457,19 @@ func ConfirmRequestOnSourceAndDest(t *testing.T, env deployment.Environment, sta })) fmt.Printf("Commit confirmed for seqnr %d", seqNum) - require.NoError(t, - ConfirmExecWithSeqNr(t, env.Chains[sourceCS], env.Chains[destCS], state.Chains[destCS].OffRamp, &startBlock, seqNum)) + require.NoError( + t, + commonutils.JustError( + ConfirmExecWithSeqNr( + t, + env.Chains[sourceCS], + env.Chains[destCS], + state.Chains[destCS].OffRamp, + &startBlock, + seqNum, + ), + ), + ) return nil } diff --git a/deployment/changeset.go b/deployment/changeset.go index e6c0988e67e..abce4942203 100644 --- a/deployment/changeset.go +++ b/deployment/changeset.go @@ -18,7 +18,7 @@ var ( // Its recommended that changesets operate on a small number of chains (e.g. 1-3) // to reduce the risk of partial failures. // If the configuration is unexpected type or format, the changeset should return ErrInvalidConfig. -type ChangeSet func(e Environment, config interface{}) (ChangesetOutput, error) +type ChangeSet[C any] func(e Environment, config C) (ChangesetOutput, error) // ChangesetOutput is the output of a Changeset function. // Think of it like a state transition output. diff --git a/deployment/environment/clo/env.go b/deployment/environment/clo/env.go deleted file mode 100644 index d1683ad4e1e..00000000000 --- a/deployment/environment/clo/env.go +++ /dev/null @@ -1,137 +0,0 @@ -package clo - -import ( - "strconv" - "testing" - - "github.com/test-go/testify/require" - - "github.com/smartcontractkit/chainlink-common/pkg/logger" - "github.com/smartcontractkit/chainlink/deployment" - "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" - "github.com/smartcontractkit/chainlink/deployment/environment/memory" -) - -type DonEnvConfig struct { - DonName string - Chains map[uint64]deployment.Chain - Logger logger.Logger - Nops []*models.NodeOperator -} - -func NewDonEnv(t *testing.T, cfg DonEnvConfig) *deployment.Environment { - // no bootstraps in the don as far as capabilities registry is concerned - for _, nop := range cfg.Nops { - for _, node := range nop.Nodes { - for _, chain := range node.ChainConfigs { - if chain.Ocr2Config.IsBootstrap { - t.Fatalf("Don nodes should not be bootstraps nop %s node %s chain %s", nop.ID, node.ID, chain.Network.ChainID) - } - } - } - } - out := deployment.NewEnvironment( - cfg.DonName, - cfg.Logger, - deployment.NewMemoryAddressBook(), - cfg.Chains, - make([]string, 0), - NewJobClient(cfg.Logger, cfg.Nops), - ) - // assume that all the nodes in the provided input nops are part of the don - for _, nop := range cfg.Nops { - for _, node := range nop.Nodes { - out.NodeIDs = append(out.NodeIDs, node.ID) - } - } - - return out -} - -func NewDonEnvWithMemoryChains(t *testing.T, cfg DonEnvConfig, ignore func(*models.NodeChainConfig) bool) *deployment.Environment { - e := NewDonEnv(t, cfg) - // overwrite the chains with memory chains - chains := make(map[uint64]struct{}) - for _, nop := range cfg.Nops { - for _, node := range nop.Nodes { - for _, chain := range node.ChainConfigs { - if ignore(chain) { - continue - } - id, err := strconv.ParseUint(chain.Network.ChainID, 10, 64) - require.NoError(t, err, "failed to parse chain id to uint64") - chains[id] = struct{}{} - } - } - } - var cs []uint64 - for c := range chains { - cs = append(cs, c) - } - memoryChains := memory.NewMemoryChainsWithChainIDs(t, cs) - e.Chains = memoryChains - return e -} - -// MultiDonEnvironment is a single logical deployment environment (like dev, testnet, prod,...). -// It represents the idea that different nodesets host different capabilities. -// Each element in the DonEnv is a logical set of nodes that host the same capabilities. -// This model allows us to reuse the existing Environment abstraction while supporting multiple nodesets at -// expense of slightly abusing the original abstraction. Specifically, the abuse is that -// each Environment in the DonToEnv map is a subset of the target deployment environment. -// One element cannot represent dev and other testnet for example. -type MultiDonEnvironment struct { - donToEnv map[string]*deployment.Environment - Logger logger.Logger - // hacky but temporary to transition to Environment abstraction. set by New - Chains map[uint64]deployment.Chain -} - -func (mde MultiDonEnvironment) Flatten(name string) *deployment.Environment { - // TODO: KS-460 integrate with the clo offchain client impl - // may need to extend the Environment abstraction use maps rather than slices for Nodes - // somehow we need to capture the fact that each nodes belong to nodesets which have different capabilities - // purposely nil to catch misuse until we do that work - return deployment.NewEnvironment( - name, - mde.Logger, - deployment.NewMemoryAddressBook(), - mde.Chains, - nil, - nil, - ) -} - -func newMultiDonEnvironment(logger logger.Logger, donToEnv map[string]*deployment.Environment) *MultiDonEnvironment { - chains := make(map[uint64]deployment.Chain) - for _, env := range donToEnv { - for sel, chain := range env.Chains { - if _, exists := chains[sel]; !exists { - chains[sel] = chain - } - } - } - return &MultiDonEnvironment{ - donToEnv: donToEnv, - Logger: logger, - Chains: chains, - } -} - -func NewTestEnv(t *testing.T, lggr logger.Logger, dons map[string]*deployment.Environment) *MultiDonEnvironment { - for _, don := range dons { - //don := don - seen := make(map[uint64]deployment.Chain) - // ensure that generated chains are the same for all environments. this ensures that he in memory representation - // points to a common object for all dons given the same selector. - for sel, chain := range don.Chains { - c, exists := seen[sel] - if exists { - don.Chains[sel] = c - } else { - seen[sel] = chain - } - } - } - return newMultiDonEnvironment(lggr, dons) -} diff --git a/deployment/environment/clo/offchain_client_impl.go b/deployment/environment/clo/offchain_client_impl.go index e670663b925..16c50126398 100644 --- a/deployment/environment/clo/offchain_client_impl.go +++ b/deployment/environment/clo/offchain_client_impl.go @@ -2,6 +2,9 @@ package clo import ( "context" + "fmt" + "slices" + "strings" "go.uber.org/zap" "google.golang.org/grpc" @@ -10,6 +13,7 @@ import ( csav1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/csa" jobv1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/job" nodev1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" + "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/shared/ptypes" "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" ) @@ -60,39 +64,68 @@ func (j JobClient) GetNode(ctx context.Context, in *nodev1.GetNodeRequest, opts } func (j JobClient) ListNodes(ctx context.Context, in *nodev1.ListNodesRequest, opts ...grpc.CallOption) (*nodev1.ListNodesResponse, error) { - //TODO CCIP-3108 - var fiterIds map[string]struct{} - include := func(id string) bool { - if in.Filter == nil || len(in.Filter.Ids) == 0 { + include := func(node *nodev1.Node) bool { + if in.Filter == nil { return true } - // lazy init - if len(fiterIds) == 0 { - for _, id := range in.Filter.Ids { - fiterIds[id] = struct{}{} + if len(in.Filter.Ids) > 0 { + idx := slices.IndexFunc(in.Filter.Ids, func(id string) bool { + return node.Id == id + }) + if idx < 0 { + return false } } - _, ok := fiterIds[id] - return ok + for _, selector := range in.Filter.Selectors { + idx := slices.IndexFunc(node.Labels, func(label *ptypes.Label) bool { + return label.Key == selector.Key + }) + if idx < 0 { + return false + } + label := node.Labels[idx] + + switch selector.Op { + case ptypes.SelectorOp_IN: + values := strings.Split(*selector.Value, ",") + found := slices.Contains(values, *label.Value) + if !found { + return false + } + default: + panic("unimplemented selector") + } + } + return true } var nodes []*nodev1.Node for _, nop := range j.NodeOperators { for _, n := range nop.Nodes { - if include(n.ID) { - nodes = append(nodes, &nodev1.Node{ - Id: n.ID, - Name: n.Name, - PublicKey: *n.PublicKey, // is this the correct val? - IsEnabled: n.Enabled, - IsConnected: n.Connected, - }) + p2pId, err := NodeP2PId(n) + if err != nil { + return nil, fmt.Errorf("failed to get p2p id for node %s: %w", n.ID, err) + } + node := &nodev1.Node{ + Id: n.ID, + Name: n.Name, + PublicKey: *n.PublicKey, + IsEnabled: n.Enabled, + IsConnected: n.Connected, + Labels: []*ptypes.Label{ + { + Key: "p2p_id", + Value: &p2pId, // here n.ID is also peer ID + }, + }, + } + if include(node) { + nodes = append(nodes, node) } } } return &nodev1.ListNodesResponse{ Nodes: nodes, }, nil - } func (j JobClient) ListNodeChainConfigs(ctx context.Context, in *nodev1.ListNodeChainConfigsRequest, opts ...grpc.CallOption) (*nodev1.ListNodeChainConfigsResponse, error) { @@ -160,13 +193,18 @@ type GetNodeOperatorsResponse struct { NodeOperators []*models.NodeOperator `json:"nodeOperators"` } -func NewJobClient(lggr logger.Logger, nops []*models.NodeOperator) *JobClient { +type JobClientConfig struct { + Nops []*models.NodeOperator +} + +func NewJobClient(lggr logger.Logger, cfg JobClientConfig) *JobClient { + c := &JobClient{ - NodeOperators: nops, + NodeOperators: cfg.Nops, nodesByID: make(map[string]*models.Node), lggr: lggr, } - for _, nop := range nops { + for _, nop := range c.NodeOperators { for _, n := range nop.Nodes { node := n c.nodesByID[n.ID] = node // maybe should use the public key instead? @@ -184,10 +222,24 @@ func cloNodeToChainConfigs(n *models.Node) []*nodev1.ChainConfig { } func cloChainCfgToJDChainCfg(ccfg *models.NodeChainConfig) *nodev1.ChainConfig { + var ctype nodev1.ChainType + switch ccfg.Network.ChainType { + case models.ChainTypeEvm: + ctype = nodev1.ChainType_CHAIN_TYPE_EVM + case models.ChainTypeSolana: + ctype = nodev1.ChainType_CHAIN_TYPE_SOLANA + case models.ChainTypeStarknet: + ctype = nodev1.ChainType_CHAIN_TYPE_STARKNET + case models.ChainTypeAptos: + ctype = nodev1.ChainType_CHAIN_TYPE_APTOS + default: + panic(fmt.Sprintf("Unsupported chain family %v", ccfg.Network.ChainType)) + } + return &nodev1.ChainConfig{ Chain: &nodev1.Chain{ Id: ccfg.Network.ChainID, - Type: nodev1.ChainType_CHAIN_TYPE_EVM, // TODO: write conversion func from clo to jd tyes + Type: ctype, }, AccountAddress: ccfg.AccountAddress, AdminAddress: ccfg.AdminAddress, diff --git a/deployment/environment/clo/offchain_client_impl_test.go b/deployment/environment/clo/offchain_client_impl_test.go index 3c9277d9fb0..f2d6fcf6f41 100644 --- a/deployment/environment/clo/offchain_client_impl_test.go +++ b/deployment/environment/clo/offchain_client_impl_test.go @@ -10,11 +10,19 @@ import ( "google.golang.org/grpc" nodev1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" + "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/shared/ptypes" "github.com/smartcontractkit/chainlink/deployment/environment/clo" "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" "github.com/smartcontractkit/chainlink/v2/core/logger" ) +var ( + p2pid_1 = "p2p_12D3KooWBCMCCZZ8x57AXvJvpCujqhZzTjWXbReaRE7807807807" + p2pid_2 = "p2p_12D3KooWBCMCCZZ8x57AXvJvpCujqhZzTjWXbReaRE6868686868" + p2pid_3 = "p2p_12D3KooWBCMCCZZ8x57AXvJvpCujqhZzTjWXbReaRE9999999999" + p2pid_4 = "p2p_12D3KooWBCMCCZZ8x57AXvJvpCujqhZzTjWXbReaRE1000000000" +) + var testNops = ` [ { @@ -26,6 +34,20 @@ var testNops = ` "name": "Chainlink Sepolia Prod Keystone One 9", "publicKey": "412dc6fe48ea4e34baaa77da2e3b032d39b938597b6f3d61fe7ed183a827a431", "connected": true, + "chainConfigs": [ + { + "network": { + "id": "140", + "chainID": "421614", + "chainType": "EVM" + }, + "ocr2Config": { + "p2pKeyBundle": { + "peerID": "p2p_12D3KooWBCMCCZZ8x57AXvJvpCujqhZzTjWXbReaRE7807807807" + } + } + } + ], "supportedProducts": [ "WORKFLOW", "OCR3_CAPABILITY" @@ -34,51 +56,93 @@ var testNops = ` ], "createdAt": "2024-08-14T19:00:07.113658Z" }, - { - "id": "68", - "name": "Chainlink Keystone Node Operator 8", - "nodes": [ - { - "id": "781", - "name": "Chainlink Sepolia Prod Keystone One 8", - "publicKey": "1141dd1e46797ced9b0fbad49115f18507f6f6e6e3cc86e7e5ba169e58645adc", - "connected": true, - "supportedProducts": [ - "WORKFLOW", - "OCR3_CAPABILITY" - ] - } - ], - "createdAt": "2024-08-14T20:26:37.622463Z" - }, - { - "id": "999", - "name": "Chainlink Keystone Node Operator 100", - "nodes": [ - { - "id": "999", - "name": "Chainlink Sepolia Prod Keystone One 999", - "publicKey": "9991dd1e46797ced9b0fbad49115f18507f6f6e6e3cc86e7e5ba169e58999999", - "connected": true, - "supportedProducts": [ - "WORKFLOW", - "OCR3_CAPABILITY" - ] - }, - { - "id": "1000", - "name": "Chainlink Sepolia Prod Keystone One 1000", - "publicKey": "1000101e46797ced9b0fbad49115f18507f6f6e6e3cc86e7e5ba169e58641000", - "connected": true, - "supportedProducts": [ - "WORKFLOW", - "OCR3_CAPABILITY" - ] - } - ], - "createdAt": "2024-08-14T20:26:37.622463Z" - } -] + { + "id": "68", + "name": "Chainlink Keystone Node Operator 8", + "nodes": [ + { + "id": "781", + "name": "Chainlink Sepolia Prod Keystone One 8", + "publicKey": "1141dd1e46797ced9b0fbad49115f18507f6f6e6e3cc86e7e5ba169e58645adc", + "connected": true, + "chainConfigs": [ + { + "network": { + "id": "140", + "chainID": "421614", + "chainType": "EVM" + }, + "ocr2Config": { + "p2pKeyBundle": { + "peerID": "p2p_12D3KooWBCMCCZZ8x57AXvJvpCujqhZzTjWXbReaRE6868686868" + } + } + } + ], + "supportedProducts": [ + "WORKFLOW", + "OCR3_CAPABILITY" + ] + } + ], + "createdAt": "2024-08-14T20:26:37.622463Z" + }, + { + "id": "999", + "name": "Chainlink Keystone Node Operator 100", + "nodes": [ + { + "id": "999", + "name": "Chainlink Sepolia Prod Keystone One 999", + "publicKey": "9991dd1e46797ced9b0fbad49115f18507f6f6e6e3cc86e7e5ba169e58999999", + "connected": true, + "chainConfigs": [ + { + "network": { + "id": "140", + "chainID": "421614", + "chainType": "EVM" + }, + "ocr2Config": { + "p2pKeyBundle": { + "peerID": "p2p_12D3KooWBCMCCZZ8x57AXvJvpCujqhZzTjWXbReaRE9999999999" + } + } + } + ], + "supportedProducts": [ + "WORKFLOW", + "OCR3_CAPABILITY" + ] + }, + { + "id": "1000", + "name": "Chainlink Sepolia Prod Keystone One 1000", + "publicKey": "1000101e46797ced9b0fbad49115f18507f6f6e6e3cc86e7e5ba169e58641000", + "connected": true, + "chainConfigs": [ + { + "network": { + "id": "140", + "chainID": "421614", + "chainType": "EVM" + }, + "ocr2Config": { + "p2pKeyBundle": { + "peerID": "p2p_12D3KooWBCMCCZZ8x57AXvJvpCujqhZzTjWXbReaRE1000000000" + } + } + } + ], + "supportedProducts": [ + "WORKFLOW", + "OCR3_CAPABILITY" + ] + } + ], + "createdAt": "2024-08-14T20:26:37.622463Z" + } +] ` func parseTestNops(t *testing.T) []*models.NodeOperator { @@ -94,7 +158,8 @@ func TestJobClient_ListNodes(t *testing.T) { nops := parseTestNops(t) type fields struct { - NodeOperators []*models.NodeOperator + NodeOperators []*models.NodeOperator + RemapNodeIDsToPeerIDs bool } type args struct { ctx context.Context @@ -135,6 +200,12 @@ func TestJobClient_ListNodes(t *testing.T) { Name: "Chainlink Sepolia Prod Keystone One 9", PublicKey: "412dc6fe48ea4e34baaa77da2e3b032d39b938597b6f3d61fe7ed183a827a431", IsConnected: true, + Labels: []*ptypes.Label{ + { + Key: "p2p_id", + Value: &p2pid_1, + }, + }, }, }, }, @@ -155,12 +226,24 @@ func TestJobClient_ListNodes(t *testing.T) { Name: "Chainlink Sepolia Prod Keystone One 9", PublicKey: "412dc6fe48ea4e34baaa77da2e3b032d39b938597b6f3d61fe7ed183a827a431", IsConnected: true, + Labels: []*ptypes.Label{ + { + Key: "p2p_id", + Value: &p2pid_1, + }, + }, }, { Id: "781", Name: "Chainlink Sepolia Prod Keystone One 8", PublicKey: "1141dd1e46797ced9b0fbad49115f18507f6f6e6e3cc86e7e5ba169e58645adc", IsConnected: true, + Labels: []*ptypes.Label{ + { + Key: "p2p_id", + Value: &p2pid_2, + }, + }, }, }, }, @@ -181,12 +264,24 @@ func TestJobClient_ListNodes(t *testing.T) { Name: "Chainlink Sepolia Prod Keystone One 999", PublicKey: "9991dd1e46797ced9b0fbad49115f18507f6f6e6e3cc86e7e5ba169e58999999", IsConnected: true, + Labels: []*ptypes.Label{ + { + Key: "p2p_id", + Value: &p2pid_3, + }, + }, }, { Id: "1000", Name: "Chainlink Sepolia Prod Keystone One 1000", PublicKey: "1000101e46797ced9b0fbad49115f18507f6f6e6e3cc86e7e5ba169e58641000", IsConnected: true, + Labels: []*ptypes.Label{ + { + Key: "p2p_id", + Value: &p2pid_4, + }, + }, }, }, }, @@ -194,7 +289,7 @@ func TestJobClient_ListNodes(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - j := clo.NewJobClient(lggr, tt.fields.NodeOperators) + j := clo.NewJobClient(lggr, clo.JobClientConfig{Nops: tt.fields.NodeOperators}) got, err := j.ListNodes(tt.args.ctx, tt.args.in, tt.args.opts...) if (err != nil) != tt.wantErr { @@ -558,7 +653,7 @@ func TestJobClient_ListNodeChainConfigs(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - j := clo.NewJobClient(lggr, tt.fields.NodeOperators) + j := clo.NewJobClient(lggr, clo.JobClientConfig{Nops: tt.fields.NodeOperators}) got, err := j.ListNodeChainConfigs(tt.args.ctx, tt.args.in, tt.args.opts...) if (err != nil) != tt.wantErr { diff --git a/deployment/environment/clo/utils.go b/deployment/environment/clo/utils.go index 79502ef6706..67be141a6db 100644 --- a/deployment/environment/clo/utils.go +++ b/deployment/environment/clo/utils.go @@ -1,6 +1,8 @@ package clo import ( + "fmt" + jd "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" ) @@ -30,3 +32,70 @@ func NewChainConfig(chain *models.NodeChainConfig) *jd.ChainConfig { }, } } + +func NodeP2PId(n *models.Node) (string, error) { + p2pIds := make(map[string]struct{}) + for _, cc := range n.ChainConfigs { + if cc.Ocr2Config != nil && cc.Ocr2Config.P2pKeyBundle != nil { + p2pIds[cc.Ocr2Config.P2pKeyBundle.PeerID] = struct{}{} + } + } + if len(p2pIds) == 0 { + return "", fmt.Errorf("no p2p id found for node %s", n.ID) + } + if len(p2pIds) > 1 { + return "", fmt.Errorf("multiple p2p ids found for node %s", n.ID) + } + var p2pId string + for k := range p2pIds { + p2pId = k + break + } + return p2pId, nil +} + +func NodesToPeerIDs(nodes []*models.Node) ([]string, error) { + var p2pIds []string + for _, node := range nodes { + p2pId, err := NodeP2PId(node) + if err != nil { + return nil, err + } + p2pIds = append(p2pIds, p2pId) + } + return p2pIds, nil +} + +func NopsToNodes(nops []*models.NodeOperator) []*models.Node { + var nodes []*models.Node + for _, nop := range nops { + nodes = append(nodes, nop.Nodes...) + } + return nodes +} + +func NopsToPeerIds(nops []*models.NodeOperator) ([]string, error) { + return NodesToPeerIDs(NopsToNodes(nops)) +} + +func SetIdToPeerId(n *models.Node) error { + p2pId, err := NodeP2PId(n) + if err != nil { + return err + } + n.ID = p2pId + return nil +} + +// SetNodeIdsToPeerIds sets the ID of each node in the NOPs to the P2P ID of the node +// It mutates the input NOPs +func SetNodeIdsToPeerIds(nops []*models.NodeOperator) error { + for _, nop := range nops { + for _, n := range nop.Nodes { + if err := SetIdToPeerId(n); err != nil { + return err + } + } + } + return nil +} diff --git a/deployment/environment/clo/utils_test.go b/deployment/environment/clo/utils_test.go new file mode 100644 index 00000000000..e2202d4e14f --- /dev/null +++ b/deployment/environment/clo/utils_test.go @@ -0,0 +1,168 @@ +package clo + +import ( + "testing" + + "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" + "github.com/stretchr/testify/assert" +) + +func TestSetNodeIdsToPeerIds(t *testing.T) { + type args struct { + nops []*models.NodeOperator + } + tests := []struct { + name string + args args + want []*models.NodeOperator + wantErr bool + }{ + { + name: "no nodes", + args: args{ + nops: []*models.NodeOperator{ + { + ID: "nop1", + }, + }, + }, + want: []*models.NodeOperator{ + { + ID: "nop1", + }, + }, + }, + { + name: "error no p2p key bundle", + args: args{ + nops: []*models.NodeOperator{ + { + ID: "nop1", + Nodes: []*models.Node{ + { + ID: "node1", + ChainConfigs: []*models.NodeChainConfig{ + { + Ocr2Config: &models.NodeOCR2Config{}, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "error multiple p2p key bundle", + args: args{ + nops: []*models.NodeOperator{ + { + ID: "nop1", + Nodes: []*models.Node{ + { + ID: "node1", + ChainConfigs: []*models.NodeChainConfig{ + { + Ocr2Config: &models.NodeOCR2Config{ + P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ + PeerID: "peer1", + }, + }, + }, + { + Ocr2Config: &models.NodeOCR2Config{ + P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ + PeerID: "peer2", + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "multiple nodes", + args: args{ + nops: []*models.NodeOperator{ + { + ID: "nop1", + Nodes: []*models.Node{ + { + ID: "node1", + ChainConfigs: []*models.NodeChainConfig{ + { + Ocr2Config: &models.NodeOCR2Config{ + P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ + PeerID: "peer1", + }, + }, + }, + }, + }, + { + ID: "node2", + ChainConfigs: []*models.NodeChainConfig{ + { + Ocr2Config: &models.NodeOCR2Config{ + P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ + PeerID: "another peer id", + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: []*models.NodeOperator{ + { + ID: "nop1", + Nodes: []*models.Node{ + { + ID: "peer1", + ChainConfigs: []*models.NodeChainConfig{ + { + Ocr2Config: &models.NodeOCR2Config{ + P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ + PeerID: "peer1", + }, + }, + }, + }, + }, + { + ID: "another peer id", + ChainConfigs: []*models.NodeChainConfig{ + { + Ocr2Config: &models.NodeOCR2Config{ + P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ + PeerID: "another peer id", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := SetNodeIdsToPeerIds(tt.args.nops) + if (err != nil) != tt.wantErr { + t.Errorf("SetNodeIdsToPeerIds() error = %v, wantErr %v", err, tt.wantErr) + } + if err != nil { + return + } + assert.EqualValues(t, tt.args.nops, tt.want) + }) + } +} diff --git a/deployment/environment/devenv/don.go b/deployment/environment/devenv/don.go index c14216f3894..830f5b921bc 100644 --- a/deployment/environment/devenv/don.go +++ b/deployment/environment/devenv/don.go @@ -7,7 +7,6 @@ import ( "strings" "time" - "github.com/AlekSi/pointer" "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" "github.com/sethvargo/go-retry" @@ -34,6 +33,7 @@ type NodeInfo struct { Name string // name of the node, used to identify the node, helpful in logs AdminAddr string // admin address to send payments to, applicable only for non-bootstrap nodes MultiAddr string // multi address denoting node's FQN (needed for deriving P2PBootstrappers in OCR), applicable only for bootstrap nodes + Labels map[string]string // labels to use when registering the node with job distributor } type DON struct { @@ -44,7 +44,7 @@ func (don *DON) PluginNodes() []Node { var pluginNodes []Node for _, node := range don.Nodes { for _, label := range node.labels { - if label.Key == NodeLabelKeyType && pointer.GetString(label.Value) == NodeLabelValuePlugin { + if label.Key == NodeLabelKeyType && value(label.Value) == NodeLabelValuePlugin { pluginNodes = append(pluginNodes, node) } } @@ -104,6 +104,12 @@ func NewRegisteredDON(ctx context.Context, nodeInfo []NodeInfo, jd JobDistributo return nil, fmt.Errorf("failed to create node %d: %w", i, err) } // node Labels so that it's easier to query them + for key, value := range info.Labels { + node.labels = append(node.labels, &ptypes.Label{ + Key: key, + Value: &value, + }) + } if info.IsBootstrap { // create multi address for OCR2, applicable only for bootstrap nodes if info.MultiAddr == "" { @@ -115,7 +121,7 @@ func NewRegisteredDON(ctx context.Context, nodeInfo []NodeInfo, jd JobDistributo node.adminAddr = "" node.labels = append(node.labels, &ptypes.Label{ Key: NodeLabelKeyType, - Value: pointer.ToString(NodeLabelValueBootstrap), + Value: ptr(NodeLabelValueBootstrap), }) } else { // multi address is not applicable for non-bootstrap nodes @@ -123,7 +129,7 @@ func NewRegisteredDON(ctx context.Context, nodeInfo []NodeInfo, jd JobDistributo node.multiAddr = "" node.labels = append(node.labels, &ptypes.Label{ Key: NodeLabelKeyType, - Value: pointer.ToString(NodeLabelValuePlugin), + Value: ptr(NodeLabelValuePlugin), }) } // Set up Job distributor in node and register node with the job distributor @@ -181,17 +187,35 @@ type JDChainConfigInput struct { func (n *Node) CreateCCIPOCRSupportedChains(ctx context.Context, chains []JDChainConfigInput, jd JobDistributor) error { for i, chain := range chains { chainId := strconv.FormatUint(chain.ChainID, 10) - accountAddr, err := n.gqlClient.FetchAccountAddress(ctx, chainId) - if err != nil { - return fmt.Errorf("failed to fetch account address for node %s: %w", n.Name, err) - } - if accountAddr == nil { - return fmt.Errorf("no account address found for node %s", n.Name) - } - if n.AccountAddr == nil { - n.AccountAddr = make(map[uint64]string) + var account string + switch chain.ChainType { + case "EVM": + accountAddr, err := n.gqlClient.FetchAccountAddress(ctx, chainId) + if err != nil { + return fmt.Errorf("failed to fetch account address for node %s: %w", n.Name, err) + } + if accountAddr == nil { + return fmt.Errorf("no account address found for node %s", n.Name) + } + if n.AccountAddr == nil { + n.AccountAddr = make(map[uint64]string) + } + n.AccountAddr[chain.ChainID] = *accountAddr + account = *accountAddr + case "APTOS", "SOLANA": + accounts, err := n.gqlClient.FetchKeys(ctx, chain.ChainType) + if err != nil { + return fmt.Errorf("failed to fetch account address for node %s: %w", n.Name, err) + } + if len(accounts) == 0 { + return fmt.Errorf("no account address found for node %s", n.Name) + } + + account = accounts[0] + default: + return fmt.Errorf("unsupported chainType %v", chain.ChainType) } - n.AccountAddr[chain.ChainID] = *accountAddr + peerID, err := n.gqlClient.FetchP2PPeerID(ctx) if err != nil { return fmt.Errorf("failed to fetch peer id for node %s: %w", n.Name, err) @@ -210,7 +234,7 @@ func (n *Node) CreateCCIPOCRSupportedChains(ctx context.Context, chains []JDChai // fetch node labels to know if the node is bootstrap or plugin isBootstrap := false for _, label := range n.labels { - if label.Key == NodeLabelKeyType && pointer.GetString(label.Value) == NodeLabelValueBootstrap { + if label.Key == NodeLabelKeyType && value(label.Value) == NodeLabelValueBootstrap { isBootstrap = true break } @@ -221,12 +245,12 @@ func (n *Node) CreateCCIPOCRSupportedChains(ctx context.Context, chains []JDChai JobDistributorID: n.JDId, ChainID: chainId, ChainType: chain.ChainType, - AccountAddr: pointer.GetString(accountAddr), + AccountAddr: account, AdminAddr: n.adminAddr, Ocr2Enabled: true, Ocr2IsBootstrap: isBootstrap, Ocr2Multiaddr: n.multiAddr, - Ocr2P2PPeerID: pointer.GetString(peerID), + Ocr2P2PPeerID: value(peerID), Ocr2KeyBundleID: ocr2BundleId, Ocr2Plugins: `{"commit":true,"execute":true,"median":false,"mercury":false}`, }) @@ -291,6 +315,20 @@ func (n *Node) RegisterNodeToJobDistributor(ctx context.Context, jd JobDistribut return fmt.Errorf("no csa key found for node %s", n.Name) } csaKey := strings.TrimPrefix(*csaKeyRes, "csa_") + + // tag nodes with p2p_id for easy lookup + peerID, err := n.gqlClient.FetchP2PPeerID(ctx) + if err != nil { + return fmt.Errorf("failed to fetch peer id for node %s: %w", n.Name, err) + } + if peerID == nil { + return fmt.Errorf("no peer id found for node %s", n.Name) + } + n.labels = append(n.labels, &ptypes.Label{ + Key: "p2p_id", + Value: peerID, + }) + // register the node in the job distributor registerResponse, err := jd.RegisterNode(ctx, &nodev1.RegisterNodeRequest{ PublicKey: csaKey, @@ -381,3 +419,15 @@ func (n *Node) ReplayLogs(blockByChain map[uint64]uint64) error { } return nil } + +func ptr[T any](v T) *T { + return &v +} + +func value[T any](v *T) T { + zero := new(T) + if v == nil { + return *zero + } + return *v +} diff --git a/deployment/environment/devenv/don_test.go b/deployment/environment/devenv/don_test.go new file mode 100644 index 00000000000..f93436f72f5 --- /dev/null +++ b/deployment/environment/devenv/don_test.go @@ -0,0 +1,19 @@ +package devenv + +import ( + "testing" + + "github.com/test-go/testify/require" +) + +func TestPtrVal(t *testing.T) { + + x := "hello" + xptr := ptr(x) + got := value(xptr) + require.Equal(t, x, got) + + var y *string + got = value(y) + require.Equal(t, "", got) +} diff --git a/deployment/environment/devenv/jd.go b/deployment/environment/devenv/jd.go index 2374aa1366c..9af8412d61e 100644 --- a/deployment/environment/devenv/jd.go +++ b/deployment/environment/devenv/jd.go @@ -4,8 +4,10 @@ import ( "context" "fmt" + "golang.org/x/oauth2" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" csav1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/csa" jobv1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/job" @@ -17,11 +19,39 @@ type JDConfig struct { GRPC string WSRPC string Creds credentials.TransportCredentials + Auth oauth2.TokenSource NodeInfo []NodeInfo } +func authTokenInterceptor(source oauth2.TokenSource) grpc.UnaryClientInterceptor { + return func( + ctx context.Context, + method string, + req, reply any, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, + ) error { + token, err := source.Token() + if err != nil { + return err + } + + return invoker( + metadata.AppendToOutgoingContext(ctx, "authorization", "Bearer "+token.AccessToken), + method, req, reply, cc, opts..., + ) + } +} + func NewJDConnection(cfg JDConfig) (*grpc.ClientConn, error) { - conn, err := grpc.NewClient(cfg.GRPC, grpc.WithTransportCredentials(cfg.Creds)) + opts := []grpc.DialOption{ + grpc.WithTransportCredentials(cfg.Creds), + } + if cfg.Auth != nil { + opts = append(opts, grpc.WithUnaryInterceptor(authTokenInterceptor(cfg.Auth))) + } + conn, err := grpc.NewClient(cfg.GRPC, opts...) if err != nil { return nil, fmt.Errorf("failed to connect Job Distributor service. Err: %w", err) } diff --git a/deployment/environment/memory/chain.go b/deployment/environment/memory/chain.go index bad50be9b01..1bb359f9c53 100644 --- a/deployment/environment/memory/chain.go +++ b/deployment/environment/memory/chain.go @@ -71,7 +71,7 @@ func GenerateChainsWithIds(t *testing.T, chainIDs []uint64) map[uint64]EVMChain owner, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) require.NoError(t, err) backend := simulated.NewBackend(types.GenesisAlloc{ - owner.From: {Balance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(params.Ether))}}, + owner.From: {Balance: big.NewInt(0).Mul(big.NewInt(700000), big.NewInt(params.Ether))}}, simulated.WithBlockGasLimit(10000000)) backend.Commit() // Note initializes block timestamp to now(). chains[chainID] = EVMChain{ diff --git a/deployment/environment/memory/environment.go b/deployment/environment/memory/environment.go index 7b41a893f75..a1478a3bf52 100644 --- a/deployment/environment/memory/environment.go +++ b/deployment/environment/memory/environment.go @@ -5,6 +5,7 @@ import ( "fmt" "testing" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/core/types" "github.com/hashicorp/consul/sdk/freeport" "github.com/stretchr/testify/require" @@ -28,11 +29,23 @@ type MemoryEnvironmentConfig struct { RegistryConfig deployment.CapabilityRegistryConfig } +// For placeholders like aptos +func NewMemoryChain(t *testing.T, selector uint64) deployment.Chain { + return deployment.Chain{ + Selector: selector, + Client: nil, + DeployerKey: &bind.TransactOpts{}, + Confirm: func(tx *types.Transaction) (uint64, error) { + return 0, nil + }, + } +} + // Needed for environment variables on the node which point to prexisitng addresses. // i.e. CapReg. -func NewMemoryChains(t *testing.T, numChains int) (map[uint64]deployment.Chain, map[uint64]EVMChain) { +func NewMemoryChains(t *testing.T, numChains int) map[uint64]deployment.Chain { mchains := GenerateChains(t, numChains) - return generateMemoryChain(t, mchains), mchains + return generateMemoryChain(t, mchains) } func NewMemoryChainsWithChainIDs(t *testing.T, chainIDs []uint64) map[uint64]deployment.Chain { @@ -77,20 +90,20 @@ func generateMemoryChain(t *testing.T, inputs map[uint64]EVMChain) map[uint64]de return chains } -func NewNodes(t *testing.T, logLevel zapcore.Level, mchains map[uint64]EVMChain, numNodes, numBootstraps int, registryConfig deployment.CapabilityRegistryConfig) map[string]Node { +func NewNodes(t *testing.T, logLevel zapcore.Level, chains map[uint64]deployment.Chain, numNodes, numBootstraps int, registryConfig deployment.CapabilityRegistryConfig) map[string]Node { nodesByPeerID := make(map[string]Node) ports := freeport.GetN(t, numBootstraps+numNodes) // bootstrap nodes must be separate nodes from plugin nodes, // since we won't run a bootstrapper and a plugin oracle on the same // chainlink node in production. for i := 0; i < numBootstraps; i++ { - node := NewNode(t, ports[i], mchains, logLevel, true /* bootstrap */, registryConfig) + node := NewNode(t, ports[i], chains, logLevel, true /* bootstrap */, registryConfig) nodesByPeerID[node.Keys.PeerID.String()] = *node // Note in real env, this ID is allocated by JD. } for i := 0; i < numNodes; i++ { // grab port offset by numBootstraps, since above loop also takes some ports. - node := NewNode(t, ports[numBootstraps+i], mchains, logLevel, false /* bootstrap */, registryConfig) + node := NewNode(t, ports[numBootstraps+i], chains, logLevel, false /* bootstrap */, registryConfig) nodesByPeerID[node.Keys.PeerID.String()] = *node // Note in real env, this ID is allocated by JD. } @@ -117,8 +130,8 @@ func NewMemoryEnvironmentFromChainsNodes(t *testing.T, // To be used by tests and any kind of deployment logic. func NewMemoryEnvironment(t *testing.T, lggr logger.Logger, logLevel zapcore.Level, config MemoryEnvironmentConfig) deployment.Environment { - chains, mchains := NewMemoryChains(t, config.Chains) - nodes := NewNodes(t, logLevel, mchains, config.Nodes, config.Bootstraps, config.RegistryConfig) + chains := NewMemoryChains(t, config.Chains) + nodes := NewNodes(t, logLevel, chains, config.Nodes, config.Bootstraps, config.RegistryConfig) var nodeIDs []string for id := range nodes { nodeIDs = append(nodeIDs, id) diff --git a/deployment/environment/memory/job_client.go b/deployment/environment/memory/job_client.go index d572f5f92f5..df1e3d5c5d5 100644 --- a/deployment/environment/memory/job_client.go +++ b/deployment/environment/memory/job_client.go @@ -4,15 +4,21 @@ import ( "context" "errors" "fmt" + "slices" "strconv" + "strings" "github.com/ethereum/go-ethereum/common" "google.golang.org/grpc" + chainsel "github.com/smartcontractkit/chain-selectors" + csav1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/csa" jobv1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/job" nodev1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" + "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/shared/ptypes" "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/validate" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" ) type JobClient struct { @@ -62,7 +68,7 @@ func (j JobClient) GetNode(ctx context.Context, in *nodev1.GetNodeRequest, opts return &nodev1.GetNodeResponse{ Node: &nodev1.Node{ Id: in.Id, - PublicKey: n.Keys.OCRKeyBundle.ID(), // is this the correct val? + PublicKey: n.Keys.CSA.PublicKeyString(), IsEnabled: true, IsConnected: true, }, @@ -71,35 +77,61 @@ func (j JobClient) GetNode(ctx context.Context, in *nodev1.GetNodeRequest, opts func (j JobClient) ListNodes(ctx context.Context, in *nodev1.ListNodesRequest, opts ...grpc.CallOption) (*nodev1.ListNodesResponse, error) { //TODO CCIP-3108 - var fiterIds map[string]struct{} - include := func(id string) bool { - if in.Filter == nil || len(in.Filter.Ids) == 0 { + include := func(node *nodev1.Node) bool { + if in.Filter == nil { return true } - // lazy init - if len(fiterIds) == 0 { - for _, id := range in.Filter.Ids { - fiterIds[id] = struct{}{} + if len(in.Filter.Ids) > 0 { + idx := slices.IndexFunc(in.Filter.Ids, func(id string) bool { + return node.Id == id + }) + if idx < 0 { + return false + } + } + for _, selector := range in.Filter.Selectors { + idx := slices.IndexFunc(node.Labels, func(label *ptypes.Label) bool { + return label.Key == selector.Key + }) + if idx < 0 { + return false + } + label := node.Labels[idx] + + switch selector.Op { + case ptypes.SelectorOp_IN: + values := strings.Split(*selector.Value, ",") + found := slices.Contains(values, *label.Value) + if !found { + return false + } + default: + panic("unimplemented selector") } } - _, ok := fiterIds[id] - return ok + return true } var nodes []*nodev1.Node for id, n := range j.Nodes { - if include(id) { - nodes = append(nodes, &nodev1.Node{ - Id: id, - PublicKey: n.Keys.OCRKeyBundle.ID(), // is this the correct val? - IsEnabled: true, - IsConnected: true, - }) + node := &nodev1.Node{ + Id: id, + PublicKey: n.Keys.CSA.ID(), + IsEnabled: true, + IsConnected: true, + Labels: []*ptypes.Label{ + { + Key: "p2p_id", + Value: ptr(n.Keys.PeerID.String()), + }, + }, + } + if include(node) { + nodes = append(nodes, node) } } return &nodev1.ListNodesResponse{ Nodes: nodes, }, nil - } func (j JobClient) ListNodeChainConfigs(ctx context.Context, in *nodev1.ListNodeChainConfigsRequest, opts ...grpc.CallOption) (*nodev1.ListNodeChainConfigsResponse, error) { @@ -113,8 +145,17 @@ func (j JobClient) ListNodeChainConfigs(ctx context.Context, in *nodev1.ListNode if !ok { return nil, fmt.Errorf("node id not found: %s", in.Filter.NodeIds[0]) } - offpk := n.Keys.OCRKeyBundle.OffchainPublicKey() - cpk := n.Keys.OCRKeyBundle.ConfigEncryptionPublicKey() + evmBundle := n.Keys.OCRKeyBundles[chaintype.EVM] + offpk := evmBundle.OffchainPublicKey() + cpk := evmBundle.ConfigEncryptionPublicKey() + + evmKeyBundle := &nodev1.OCR2Config_OCRKeyBundle{ + BundleId: evmBundle.ID(), + ConfigPublicKey: common.Bytes2Hex(cpk[:]), + OffchainPublicKey: common.Bytes2Hex(offpk[:]), + OnchainSigningAddress: evmBundle.OnChainPublicKey(), + } + var chainConfigs []*nodev1.ChainConfig for evmChainID, transmitter := range n.Keys.TransmittersByEVMChainID { chainConfigs = append(chainConfigs, &nodev1.ChainConfig{ @@ -123,7 +164,7 @@ func (j JobClient) ListNodeChainConfigs(ctx context.Context, in *nodev1.ListNode Type: nodev1.ChainType_CHAIN_TYPE_EVM, }, AccountAddress: transmitter.String(), - AdminAddress: "", + AdminAddress: transmitter.String(), // TODO: custom address Ocr1Config: nil, Ocr2Config: &nodev1.OCR2Config{ Enabled: true, @@ -131,19 +172,91 @@ func (j JobClient) ListNodeChainConfigs(ctx context.Context, in *nodev1.ListNode P2PKeyBundle: &nodev1.OCR2Config_P2PKeyBundle{ PeerId: n.Keys.PeerID.String(), }, - OcrKeyBundle: &nodev1.OCR2Config_OCRKeyBundle{ - BundleId: n.Keys.OCRKeyBundle.ID(), - ConfigPublicKey: common.Bytes2Hex(cpk[:]), - OffchainPublicKey: common.Bytes2Hex(offpk[:]), - OnchainSigningAddress: n.Keys.OCRKeyBundle.OnChainPublicKey(), - }, + OcrKeyBundle: evmKeyBundle, Multiaddr: n.Addr.String(), Plugins: nil, ForwarderAddress: ptr(""), }, }) } + for _, selector := range n.Chains { + family, err := chainsel.GetSelectorFamily(selector) + if err != nil { + return nil, err + } + chainID, err := chainsel.ChainIdFromSelector(selector) + if err != nil { + return nil, err + } + + if family == chainsel.FamilyEVM { + // already handled above + continue + } + + var ocrtype chaintype.ChainType + switch family { + case chainsel.FamilyEVM: + ocrtype = chaintype.EVM + case chainsel.FamilySolana: + ocrtype = chaintype.Solana + case chainsel.FamilyStarknet: + ocrtype = chaintype.StarkNet + case chainsel.FamilyCosmos: + ocrtype = chaintype.Cosmos + case chainsel.FamilyAptos: + ocrtype = chaintype.Aptos + default: + panic(fmt.Sprintf("Unsupported chain family %v", family)) + } + + bundle := n.Keys.OCRKeyBundles[ocrtype] + + offpk := bundle.OffchainPublicKey() + cpk := bundle.ConfigEncryptionPublicKey() + + keyBundle := &nodev1.OCR2Config_OCRKeyBundle{ + BundleId: bundle.ID(), + ConfigPublicKey: common.Bytes2Hex(cpk[:]), + OffchainPublicKey: common.Bytes2Hex(offpk[:]), + OnchainSigningAddress: bundle.OnChainPublicKey(), + } + + var ctype nodev1.ChainType + switch family { + case chainsel.FamilyEVM: + ctype = nodev1.ChainType_CHAIN_TYPE_EVM + case chainsel.FamilySolana: + ctype = nodev1.ChainType_CHAIN_TYPE_SOLANA + case chainsel.FamilyStarknet: + ctype = nodev1.ChainType_CHAIN_TYPE_STARKNET + case chainsel.FamilyAptos: + ctype = nodev1.ChainType_CHAIN_TYPE_APTOS + default: + panic(fmt.Sprintf("Unsupported chain family %v", family)) + } + chainConfigs = append(chainConfigs, &nodev1.ChainConfig{ + Chain: &nodev1.Chain{ + Id: strconv.Itoa(int(chainID)), + Type: ctype, + }, + AccountAddress: "", // TODO: support AccountAddress + AdminAddress: "", + Ocr1Config: nil, + Ocr2Config: &nodev1.OCR2Config{ + Enabled: true, + IsBootstrap: n.IsBoostrap, + P2PKeyBundle: &nodev1.OCR2Config_P2PKeyBundle{ + PeerId: n.Keys.PeerID.String(), + }, + OcrKeyBundle: keyBundle, + Multiaddr: n.Addr.String(), + Plugins: nil, + ForwarderAddress: ptr(""), + }, + }) + } // TODO: I think we can pull it from the feeds manager. return &nodev1.ListNodeChainConfigsResponse{ ChainConfigs: chainConfigs, diff --git a/deployment/environment/memory/node.go b/deployment/environment/memory/node.go index 90ad264faa9..c2e4e457fbd 100644 --- a/deployment/environment/memory/node.go +++ b/deployment/environment/memory/node.go @@ -15,6 +15,7 @@ import ( chainsel "github.com/smartcontractkit/chain-selectors" "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" + "golang.org/x/exp/maps" "github.com/smartcontractkit/chainlink-common/pkg/config" "github.com/smartcontractkit/chainlink-common/pkg/loop" @@ -35,6 +36,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" "github.com/smartcontractkit/chainlink/v2/core/services/keystore" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey" "github.com/smartcontractkit/chainlink/v2/core/services/relay" @@ -46,6 +48,7 @@ import ( type Node struct { App chainlink.Application // Transmitter key/OCR keys for this node + Chains []uint64 // chain selectors Keys Keys Addr net.TCPAddr IsBoostrap bool @@ -68,11 +71,23 @@ func (n Node) ReplayLogs(chains map[uint64]uint64) error { func NewNode( t *testing.T, port int, // Port for the P2P V2 listener. - chains map[uint64]EVMChain, + chains map[uint64]deployment.Chain, logLevel zapcore.Level, bootstrap bool, registryConfig deployment.CapabilityRegistryConfig, ) *Node { + evmchains := make(map[uint64]EVMChain) + for _, chain := range chains { + evmChainID, err := chainsel.ChainIdFromSelector(chain.Selector) + if err != nil { + t.Fatal(err) + } + evmchains[evmChainID] = EVMChain{ + Backend: chain.Client.(*Backend).Sim, + DeployerKey: chain.DeployerKey, + } + } + // Do not want to load fixtures as they contain a dummy chainID. // Create database and initial configuration. cfg, db := heavyweight.FullTestDBNoFixturesV2(t, func(c *chainlink.Config, s *chainlink.Secrets) { @@ -102,7 +117,7 @@ func NewNode( c.Log.Level = ptr(configv2.LogLevel(logLevel)) var chainConfigs v2toml.EVMConfigs - for chainID := range chains { + for chainID := range evmchains { chainConfigs = append(chainConfigs, createConfigV2Chain(chainID)) } c.EVM = chainConfigs @@ -114,7 +129,7 @@ func NewNode( // Create clients for the core node backed by sim. clients := make(map[uint64]client.Client) - for chainID, chain := range chains { + for chainID, chain := range evmchains { clients[chainID] = client.NewSimulatedBackendClient(t, chain.Backend, big.NewInt(int64(chainID))) } @@ -185,6 +200,7 @@ func NewNode( return &Node{ App: app, + Chains: maps.Keys(chains), Keys: keys, Addr: net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: port}, IsBoostrap: bootstrap, @@ -193,49 +209,86 @@ func NewNode( type Keys struct { PeerID p2pkey.PeerID + CSA csakey.KeyV2 TransmittersByEVMChainID map[uint64]common.Address - OCRKeyBundle ocr2key.KeyBundle + OCRKeyBundles map[chaintype.ChainType]ocr2key.KeyBundle } func CreateKeys(t *testing.T, - app chainlink.Application, chains map[uint64]EVMChain) Keys { + app chainlink.Application, chains map[uint64]deployment.Chain) Keys { ctx := tests.Context(t) _, err := app.GetKeyStore().P2P().Create(ctx) require.NoError(t, err) + err = app.GetKeyStore().CSA().EnsureKey(ctx) + require.NoError(t, err) + csaKeys, err := app.GetKeyStore().CSA().GetAll() + require.NoError(t, err) + csaKey := csaKeys[0] + p2pIDs, err := app.GetKeyStore().P2P().GetAll() require.NoError(t, err) require.Len(t, p2pIDs, 1) peerID := p2pIDs[0].PeerID() // create a transmitter for each chain transmitters := make(map[uint64]common.Address) - for chainID, chain := range chains { - cid := big.NewInt(int64(chainID)) + keybundles := make(map[chaintype.ChainType]ocr2key.KeyBundle) + for _, chain := range chains { + family, err := chainsel.GetSelectorFamily(chain.Selector) + require.NoError(t, err) + + var ctype chaintype.ChainType + switch family { + case chainsel.FamilyEVM: + ctype = chaintype.EVM + case chainsel.FamilySolana: + ctype = chaintype.Solana + case chainsel.FamilyStarknet: + ctype = chaintype.StarkNet + case chainsel.FamilyCosmos: + ctype = chaintype.Cosmos + case chainsel.FamilyAptos: + ctype = chaintype.Aptos + default: + panic(fmt.Sprintf("Unsupported chain family %v", family)) + } + + keybundle, err := app.GetKeyStore().OCR2().Create(ctx, ctype) + require.NoError(t, err) + keybundles[ctype] = keybundle + + if family != chainsel.FamilyEVM { + // TODO: only support EVM transmission keys for now + continue + } + + evmChainID, err := chainsel.ChainIdFromSelector(chain.Selector) + require.NoError(t, err) + + cid := big.NewInt(int64(evmChainID)) addrs, err2 := app.GetKeyStore().Eth().EnabledAddressesForChain(ctx, cid) require.NoError(t, err2) if len(addrs) == 1 { // just fund the address - fundAddress(t, chain.DeployerKey, addrs[0], assets.Ether(10).ToInt(), chain.Backend) - transmitters[chainID] = addrs[0] + transmitters[evmChainID] = addrs[0] } else { // create key and fund it _, err3 := app.GetKeyStore().Eth().Create(ctx, cid) - require.NoError(t, err3, "failed to create key for chain", chainID) + require.NoError(t, err3, "failed to create key for chain", evmChainID) sendingKeys, err3 := app.GetKeyStore().Eth().EnabledAddressesForChain(ctx, cid) require.NoError(t, err3) require.Len(t, sendingKeys, 1) - fundAddress(t, chain.DeployerKey, sendingKeys[0], assets.Ether(10).ToInt(), chain.Backend) - transmitters[chainID] = sendingKeys[0] + transmitters[evmChainID] = sendingKeys[0] } + backend := chain.Client.(*Backend).Sim + fundAddress(t, chain.DeployerKey, transmitters[evmChainID], assets.Ether(1000).ToInt(), backend) } - require.Len(t, transmitters, len(chains)) - keybundle, err := app.GetKeyStore().OCR2().Create(ctx, chaintype.EVM) - require.NoError(t, err) return Keys{ PeerID: peerID, + CSA: csaKey, TransmittersByEVMChainID: transmitters, - OCRKeyBundle: keybundle, + OCRKeyBundles: keybundles, } } diff --git a/deployment/environment/memory/node_test.go b/deployment/environment/memory/node_test.go index 9142f48bbfe..7cbcb66d04a 100644 --- a/deployment/environment/memory/node_test.go +++ b/deployment/environment/memory/node_test.go @@ -12,7 +12,7 @@ import ( ) func TestNode(t *testing.T) { - chains := GenerateChains(t, 3) + chains := NewMemoryChains(t, 3) ports := freeport.GetN(t, 1) node := NewNode(t, ports[0], chains, zapcore.DebugLevel, false, deployment.CapabilityRegistryConfig{}) // We expect 3 transmitter keys diff --git a/deployment/environment/memory/sim.go b/deployment/environment/memory/sim.go index 29ff89f1a1f..c0fba87e2b3 100644 --- a/deployment/environment/memory/sim.go +++ b/deployment/environment/memory/sim.go @@ -15,69 +15,69 @@ import ( // OnchainClient but also exposes backend methods. type Backend struct { mu sync.Mutex - sim *simulated.Backend + Sim *simulated.Backend } func (b *Backend) Commit() common.Hash { b.mu.Lock() defer b.mu.Unlock() - return b.sim.Commit() + return b.Sim.Commit() } func (b *Backend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { - return b.sim.Client().CodeAt(ctx, contract, blockNumber) + return b.Sim.Client().CodeAt(ctx, contract, blockNumber) } func (b *Backend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { - return b.sim.Client().CallContract(ctx, call, blockNumber) + return b.Sim.Client().CallContract(ctx, call, blockNumber) } func (b *Backend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { - return b.sim.Client().EstimateGas(ctx, call) + return b.Sim.Client().EstimateGas(ctx, call) } func (b *Backend) SuggestGasPrice(ctx context.Context) (*big.Int, error) { - return b.sim.Client().SuggestGasPrice(ctx) + return b.Sim.Client().SuggestGasPrice(ctx) } func (b *Backend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { - return b.sim.Client().SuggestGasTipCap(ctx) + return b.Sim.Client().SuggestGasTipCap(ctx) } func (b *Backend) SendTransaction(ctx context.Context, tx *types.Transaction) error { - return b.sim.Client().SendTransaction(ctx, tx) + return b.Sim.Client().SendTransaction(ctx, tx) } func (b *Backend) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { - return b.sim.Client().HeaderByNumber(ctx, number) + return b.Sim.Client().HeaderByNumber(ctx, number) } func (b *Backend) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { - return b.sim.Client().PendingCodeAt(ctx, account) + return b.Sim.Client().PendingCodeAt(ctx, account) } func (b *Backend) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { - return b.sim.Client().PendingNonceAt(ctx, account) + return b.Sim.Client().PendingNonceAt(ctx, account) } func (b *Backend) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { - return b.sim.Client().FilterLogs(ctx, q) + return b.Sim.Client().FilterLogs(ctx, q) } func (b *Backend) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { - return b.sim.Client().SubscribeFilterLogs(ctx, q, ch) + return b.Sim.Client().SubscribeFilterLogs(ctx, q, ch) } func (b *Backend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { - return b.sim.Client().TransactionReceipt(ctx, txHash) + return b.Sim.Client().TransactionReceipt(ctx, txHash) } func (b *Backend) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { - return b.sim.Client().BalanceAt(ctx, account, blockNumber) + return b.Sim.Client().BalanceAt(ctx, account, blockNumber) } func (b *Backend) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) { - return b.sim.Client().NonceAt(ctx, account, blockNumber) + return b.Sim.Client().NonceAt(ctx, account, blockNumber) } func NewBackend(sim *simulated.Backend) *Backend { @@ -85,6 +85,6 @@ func NewBackend(sim *simulated.Backend) *Backend { panic("simulated backend is nil") } return &Backend{ - sim: sim, + Sim: sim, } } diff --git a/deployment/environment/web/sdk/client/client.go b/deployment/environment/web/sdk/client/client.go index b22f52f3af4..011eb0cce31 100644 --- a/deployment/environment/web/sdk/client/client.go +++ b/deployment/environment/web/sdk/client/client.go @@ -7,7 +7,6 @@ import ( "net/http" "strings" - "github.com/AlekSi/pointer" "github.com/Khan/genqlient/graphql" "github.com/smartcontractkit/chainlink/deployment/environment/web/sdk/client/doer" @@ -18,6 +17,7 @@ type Client interface { FetchCSAPublicKey(ctx context.Context) (*string, error) FetchP2PPeerID(ctx context.Context) (*string, error) FetchAccountAddress(ctx context.Context, chainID string) (*string, error) + FetchKeys(ctx context.Context, chainType string) ([]string, error) FetchOCR2KeyBundleID(ctx context.Context, chainType string) (string, error) GetJob(ctx context.Context, id string) (*generated.GetJobResponse, error) ListJobs(ctx context.Context, offset, limit int) (*generated.ListJobsResponse, error) @@ -121,12 +121,38 @@ func (c *client) FetchAccountAddress(ctx context.Context, chainID string) (*stri } for _, keyDetail := range keys.EthKeys.GetResults() { if keyDetail.GetChain().Enabled && keyDetail.GetChain().Id == chainID { - return pointer.ToString(keyDetail.Address), nil + return &keyDetail.Address, nil } } return nil, fmt.Errorf("no account found for chain %s", chainID) } +func (c *client) FetchKeys(ctx context.Context, chainType string) ([]string, error) { + keys, err := generated.FetchKeys(ctx, c.gqlClient) + if err != nil { + return nil, err + } + if keys == nil { + return nil, fmt.Errorf("no accounts found") + } + switch generated.OCR2ChainType(chainType) { + case generated.OCR2ChainTypeAptos: + var accounts []string + for _, key := range keys.AptosKeys.GetResults() { + accounts = append(accounts, key.Account) + } + return accounts, nil + case generated.OCR2ChainTypeSolana: + var accounts []string + for _, key := range keys.SolanaKeys.GetResults() { + accounts = append(accounts, key.Id) + } + return accounts, nil + default: + return nil, fmt.Errorf("unsupported chainType %v", chainType) + } +} + func (c *client) GetJob(ctx context.Context, id string) (*generated.GetJobResponse, error) { return generated.GetJob(ctx, c.gqlClient, id) } diff --git a/deployment/environment/web/sdk/internal/generated/generated.go b/deployment/environment/web/sdk/internal/generated/generated.go index 68ab3e48e4f..7b16e4a1e3f 100644 --- a/deployment/environment/web/sdk/internal/generated/generated.go +++ b/deployment/environment/web/sdk/internal/generated/generated.go @@ -1887,6 +1887,58 @@ type FetchCSAKeysResponse struct { // GetCsaKeys returns FetchCSAKeysResponse.CsaKeys, and is useful for accessing the field via an interface. func (v *FetchCSAKeysResponse) GetCsaKeys() FetchCSAKeysCsaKeysCSAKeysPayload { return v.CsaKeys } +// FetchKeysAptosKeysAptosKeysPayload includes the requested fields of the GraphQL type AptosKeysPayload. +type FetchKeysAptosKeysAptosKeysPayload struct { + Results []FetchKeysAptosKeysAptosKeysPayloadResultsAptosKey `json:"results"` +} + +// GetResults returns FetchKeysAptosKeysAptosKeysPayload.Results, and is useful for accessing the field via an interface. +func (v *FetchKeysAptosKeysAptosKeysPayload) GetResults() []FetchKeysAptosKeysAptosKeysPayloadResultsAptosKey { + return v.Results +} + +// FetchKeysAptosKeysAptosKeysPayloadResultsAptosKey includes the requested fields of the GraphQL type AptosKey. +type FetchKeysAptosKeysAptosKeysPayloadResultsAptosKey struct { + Id string `json:"id"` + Account string `json:"account"` +} + +// GetId returns FetchKeysAptosKeysAptosKeysPayloadResultsAptosKey.Id, and is useful for accessing the field via an interface. +func (v *FetchKeysAptosKeysAptosKeysPayloadResultsAptosKey) GetId() string { return v.Id } + +// GetAccount returns FetchKeysAptosKeysAptosKeysPayloadResultsAptosKey.Account, and is useful for accessing the field via an interface. +func (v *FetchKeysAptosKeysAptosKeysPayloadResultsAptosKey) GetAccount() string { return v.Account } + +// FetchKeysResponse is returned by FetchKeys on success. +type FetchKeysResponse struct { + SolanaKeys FetchKeysSolanaKeysSolanaKeysPayload `json:"solanaKeys"` + AptosKeys FetchKeysAptosKeysAptosKeysPayload `json:"aptosKeys"` +} + +// GetSolanaKeys returns FetchKeysResponse.SolanaKeys, and is useful for accessing the field via an interface. +func (v *FetchKeysResponse) GetSolanaKeys() FetchKeysSolanaKeysSolanaKeysPayload { return v.SolanaKeys } + +// GetAptosKeys returns FetchKeysResponse.AptosKeys, and is useful for accessing the field via an interface. +func (v *FetchKeysResponse) GetAptosKeys() FetchKeysAptosKeysAptosKeysPayload { return v.AptosKeys } + +// FetchKeysSolanaKeysSolanaKeysPayload includes the requested fields of the GraphQL type SolanaKeysPayload. +type FetchKeysSolanaKeysSolanaKeysPayload struct { + Results []FetchKeysSolanaKeysSolanaKeysPayloadResultsSolanaKey `json:"results"` +} + +// GetResults returns FetchKeysSolanaKeysSolanaKeysPayload.Results, and is useful for accessing the field via an interface. +func (v *FetchKeysSolanaKeysSolanaKeysPayload) GetResults() []FetchKeysSolanaKeysSolanaKeysPayloadResultsSolanaKey { + return v.Results +} + +// FetchKeysSolanaKeysSolanaKeysPayloadResultsSolanaKey includes the requested fields of the GraphQL type SolanaKey. +type FetchKeysSolanaKeysSolanaKeysPayloadResultsSolanaKey struct { + Id string `json:"id"` +} + +// GetId returns FetchKeysSolanaKeysSolanaKeysPayloadResultsSolanaKey.Id, and is useful for accessing the field via an interface. +func (v *FetchKeysSolanaKeysSolanaKeysPayloadResultsSolanaKey) GetId() string { return v.Id } + // FetchOCR2KeyBundlesOcr2KeyBundlesOCR2KeyBundlesPayload includes the requested fields of the GraphQL type OCR2KeyBundlesPayload. type FetchOCR2KeyBundlesOcr2KeyBundlesOCR2KeyBundlesPayload struct { Results []FetchOCR2KeyBundlesOcr2KeyBundlesOCR2KeyBundlesPayloadResultsOCR2KeyBundle `json:"results"` @@ -5660,6 +5712,45 @@ func FetchCSAKeys( return &data_, err_ } +// The query or mutation executed by FetchKeys. +const FetchKeys_Operation = ` +query FetchKeys { + solanaKeys { + results { + id + } + } + aptosKeys { + results { + id + account + } + } +} +` + +func FetchKeys( + ctx_ context.Context, + client_ graphql.Client, +) (*FetchKeysResponse, error) { + req_ := &graphql.Request{ + OpName: "FetchKeys", + Query: FetchKeys_Operation, + } + var err_ error + + var data_ FetchKeysResponse + resp_ := &graphql.Response{Data: &data_} + + err_ = client_.MakeRequest( + ctx_, + req_, + resp_, + ) + + return &data_, err_ +} + // The query or mutation executed by FetchOCR2KeyBundles. const FetchOCR2KeyBundles_Operation = ` query FetchOCR2KeyBundles { diff --git a/deployment/environment/web/sdk/internal/genqlient.graphql b/deployment/environment/web/sdk/internal/genqlient.graphql index 06baf4f7913..4c998a4f6a6 100644 --- a/deployment/environment/web/sdk/internal/genqlient.graphql +++ b/deployment/environment/web/sdk/internal/genqlient.graphql @@ -45,6 +45,20 @@ query FetchAccounts { } } +query FetchKeys { + solanaKeys { + results { + id + } + } + aptosKeys { + results { + id + account + } + } +} + ##################### # ocr2KeyBundles ##################### @@ -456,4 +470,4 @@ mutation UpdateJobProposalSpecDefinition( code } } -} \ No newline at end of file +} diff --git a/deployment/go.mod b/deployment/go.mod index c9da65fd6c6..19720794189 100644 --- a/deployment/go.mod +++ b/deployment/go.mod @@ -6,7 +6,6 @@ go 1.22.8 replace github.com/smartcontractkit/chainlink/v2 => ../ require ( - github.com/AlekSi/pointer v1.1.0 github.com/Khan/genqlient v0.7.0 github.com/Masterminds/semver/v3 v3.3.0 github.com/avast/retry-go/v4 v4.6.0 @@ -22,9 +21,9 @@ require ( github.com/rs/zerolog v1.33.0 github.com/sethvargo/go-retry v0.2.4 github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86 - github.com/smartcontractkit/chain-selectors v1.0.27 + github.com/smartcontractkit/chain-selectors v1.0.29 github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b - github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 + github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371 github.com/smartcontractkit/chainlink-protos/job-distributor v0.4.0 github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.13 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 @@ -35,6 +34,7 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c + golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.35.1 @@ -405,7 +405,7 @@ require ( github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e // indirect github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 // indirect - github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930 // indirect + github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669 // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8 // indirect github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.0 // indirect github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.5 // indirect @@ -490,7 +490,6 @@ require ( golang.org/x/crypto v0.28.0 // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect diff --git a/deployment/go.sum b/deployment/go.sum index 38bcb4b96a1..ce9bf9e0b7f 100644 --- a/deployment/go.sum +++ b/deployment/go.sum @@ -1378,14 +1378,14 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86 h1:qQH6fZZe31nBAG6INHph3z5ysDTPptyu0TR9uoJ1+ok= github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86/go.mod h1:WtWOoVQQEHxRHL2hNmuRrvDfYfQG/CioFNoa9Rr2mBE= -github.com/smartcontractkit/chain-selectors v1.0.27 h1:VE/ftX9Aae4gnw67yR1raKi+30iWKL/sWq8uyiLHM8k= -github.com/smartcontractkit/chain-selectors v1.0.27/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= +github.com/smartcontractkit/chain-selectors v1.0.29 h1:aZ9+OoUSMn4nqnissHtDvDoKR7JONfDqTHX3MHYIUIE= +github.com/smartcontractkit/chain-selectors v1.0.29/go.mod h1:xsKM0aN3YGcQKTPRPDDtPx2l4mlTN1Djmg0VVXV40b8= github.com/smartcontractkit/chainlink-automation v0.8.1 h1:sTc9LKpBvcKPc1JDYAmgBc2xpDKBco/Q4h4ydl6+UUU= github.com/smartcontractkit/chainlink-automation v0.8.1/go.mod h1:Iij36PvWZ6blrdC5A/nrQUBuf3MH3JvsBB9sSyc9W08= github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b h1:4kmZtaQ4fXwduHnw9xk5VmiIOW4nHg/Mx6iidlZJt5o= github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b/go.mod h1:4adKaHNaxFsRvV/lYfqtbsWyyvIPUMLR0FdOJN/ljis= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 h1:yJNBWCdNL/X8+wEs3TGTBe9gssMmw5FTFxxrlo+0mVo= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371 h1:vnNqMaAvheZgR8IDMGw0QIV1Qen3XTh7IChwW40SNfU= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f h1:BwrIaQIx5Iy6eT+DfLhFfK2XqjxRm74mVdlX8gbu4dw= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f/go.mod h1:wHtwSR3F1CQSJJZDQKuqaqFYnvkT+kMyget7dl8Clvo= github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e h1:JiETqdNM0bktAUGMc62COwXIaw3rR3M77Me6bBLG0Fg= @@ -1396,8 +1396,8 @@ github.com/smartcontractkit/chainlink-protos/job-distributor v0.4.0 h1:1xTm8UGeD github.com/smartcontractkit/chainlink-protos/job-distributor v0.4.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE= github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 h1:PBUaFfPLm+Efq7H9kdfGBivH+QhJ6vB5EZTR/sCZsxI= github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= -github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930 h1:blu++xbH/NSb+ii5hI4jczwojZ7Hc1ERXjpt/krYy9c= -github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930/go.mod h1:mGmRvlk54ufCufV4EBWizOGtXoXfePoFAuYEVC8EwdY= +github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669 h1:CBQ9ORUtGUvCr3dAm/qjpdHlYuB1SRIwtYw5LV8SLys= +github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669/go.mod h1:mGmRvlk54ufCufV4EBWizOGtXoXfePoFAuYEVC8EwdY= github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8 h1:B4DFdk6MGcQnoCjjMBCx7Z+GWQpxRWJ4O8W/dVJyWGA= github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8/go.mod h1:WkBqgBo+g34Gm5vWkDDl8Fh3Mzd7bF5hXp7rryg0t5o= github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.13 h1:T0kbw07Vb6xUyA9MIJZfErMgWseWi1zf7cYvRpoq7ug= diff --git a/deployment/keystone/changeset/append_node_capbilities.go b/deployment/keystone/changeset/append_node_capbilities.go index ae654b7017d..974c4970c51 100644 --- a/deployment/keystone/changeset/append_node_capbilities.go +++ b/deployment/keystone/changeset/append_node_capbilities.go @@ -8,19 +8,14 @@ import ( "github.com/smartcontractkit/chainlink/deployment/keystone/changeset/internal" ) -var _ deployment.ChangeSet = AppendNodeCapabilities +var _ deployment.ChangeSet[*AppendNodeCapabilitiesRequest] = AppendNodeCapabilities // AppendNodeCapabilitiesRequest is a request to add capabilities to the existing capabilities of nodes in the registry type AppendNodeCapabilitiesRequest = MutateNodeCapabilitiesRequest // AppendNodeCapabilities adds any new capabilities to the registry, merges the new capabilities with the existing capabilities // of the node, and updates the nodes in the registry host the union of the new and existing capabilities. -func AppendNodeCapabilities(env deployment.Environment, config any) (deployment.ChangesetOutput, error) { - req, ok := config.(*AppendNodeCapabilitiesRequest) - if !ok { - return deployment.ChangesetOutput{}, fmt.Errorf("invalid config type") - } - +func AppendNodeCapabilities(env deployment.Environment, req *AppendNodeCapabilitiesRequest) (deployment.ChangesetOutput, error) { cfg, err := req.convert(env) if err != nil { return deployment.ChangesetOutput{}, err diff --git a/deployment/keystone/changeset/deploy_forwarder.go b/deployment/keystone/changeset/deploy_forwarder.go index d6adbee0252..55ab0dcd86d 100644 --- a/deployment/keystone/changeset/deploy_forwarder.go +++ b/deployment/keystone/changeset/deploy_forwarder.go @@ -7,13 +7,9 @@ import ( kslib "github.com/smartcontractkit/chainlink/deployment/keystone" ) -var _ deployment.ChangeSet = DeployForwarder +var _ deployment.ChangeSet[uint64] = DeployForwarder -func DeployForwarder(env deployment.Environment, config interface{}) (deployment.ChangesetOutput, error) { - registryChainSel, ok := config.(uint64) - if !ok { - return deployment.ChangesetOutput{}, deployment.ErrInvalidConfig - } +func DeployForwarder(env deployment.Environment, registryChainSel uint64) (deployment.ChangesetOutput, error) { lggr := env.Logger // expect OCR3 to be deployed & capabilities registry regAddrs, err := env.ExistingAddresses.AddressesForChain(registryChainSel) diff --git a/deployment/keystone/changeset/deploy_ocr3.go b/deployment/keystone/changeset/deploy_ocr3.go index 016eaa97d1f..e0edf4a4440 100644 --- a/deployment/keystone/changeset/deploy_ocr3.go +++ b/deployment/keystone/changeset/deploy_ocr3.go @@ -5,7 +5,6 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink/deployment" - "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" kslib "github.com/smartcontractkit/chainlink/deployment/keystone" ) @@ -27,9 +26,8 @@ func DeployOCR3(env deployment.Environment, config interface{}) (deployment.Chan return deployment.ChangesetOutput{AddressBook: ab}, nil } -func ConfigureOCR3Contract(lggr logger.Logger, env deployment.Environment, ab deployment.AddressBook, registryChainSel uint64, nodes []*models.Node, cfg kslib.OracleConfigWithSecrets) (deployment.ChangesetOutput, error) { - - err := kslib.ConfigureOCR3ContractFromCLO(&env, registryChainSel, nodes, ab, &cfg) +func ConfigureOCR3Contract(lggr logger.Logger, env deployment.Environment, ab deployment.AddressBook, registryChainSel uint64, nodes []string, cfg kslib.OracleConfigWithSecrets) (deployment.ChangesetOutput, error) { + err := kslib.ConfigureOCR3ContractFromJD(&env, registryChainSel, nodes, ab, &cfg) if err != nil { return deployment.ChangesetOutput{}, fmt.Errorf("failed to configure OCR3Capability: %w", err) } diff --git a/deployment/keystone/changeset/internal/test/utils.go b/deployment/keystone/changeset/internal/test/utils.go index 9f332e8e28d..cea20fd327d 100644 --- a/deployment/keystone/changeset/internal/test/utils.go +++ b/deployment/keystone/changeset/internal/test/utils.go @@ -240,7 +240,7 @@ func (cc *CapabilityCache) AddCapabilities(lggr logger.Logger, chain deployment. } func testChain(t *testing.T) deployment.Chain { - chains, _ := memory.NewMemoryChains(t, 1) + chains := memory.NewMemoryChains(t, 1) var chain deployment.Chain for _, c := range chains { chain = c diff --git a/deployment/keystone/changeset/internal/update_don_test.go b/deployment/keystone/changeset/internal/update_don_test.go index baedda5e93d..12ccfe290b1 100644 --- a/deployment/keystone/changeset/internal/update_don_test.go +++ b/deployment/keystone/changeset/internal/update_don_test.go @@ -4,14 +4,14 @@ import ( "bytes" "math/big" "sort" - "strconv" "testing" "github.com/ethereum/go-ethereum/common" chainsel "github.com/smartcontractkit/chain-selectors" "github.com/smartcontractkit/chainlink-common/pkg/logger" + nodev1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" "github.com/smartcontractkit/chainlink/deployment" - "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" + "github.com/smartcontractkit/chainlink/deployment/keystone" kslib "github.com/smartcontractkit/chainlink/deployment/keystone" kscs "github.com/smartcontractkit/chainlink/deployment/keystone/changeset" "github.com/smartcontractkit/chainlink/deployment/keystone/changeset/internal" @@ -95,18 +95,19 @@ func TestUpdateDon(t *testing.T) { t.Run("empty", func(t *testing.T) { cfg := setupUpdateDonTestConfig{ - dons: []kslib.DonCapabilities{ + dons: []kslib.DonInfo{ { - Name: "don 1", - Nops: []*models.NodeOperator{ - { - Name: "nop 1", - Nodes: []*models.Node{node_1, node_2, node_3, node_4}, - }, - }, + Name: "don 1", + Nodes: []keystone.Node{node_1, node_2, node_3, node_4}, Capabilities: []kcr.CapabilitiesRegistryCapability{cap_A}, }, }, + nops: []keystone.NOP{ + { + Name: "nop 1", + Nodes: []string{node_1.ID, node_2.ID, node_3.ID, node_4.ID}, + }, + }, } testCfg := setupUpdateDonTest(t, lggr, cfg) @@ -169,26 +170,24 @@ type minimalNodeCfg struct { admin common.Address } -func newNode(t *testing.T, cfg minimalNodeCfg) *models.Node { +func newNode(t *testing.T, cfg minimalNodeCfg) keystone.Node { t.Helper() - return &models.Node{ + return keystone.Node{ ID: cfg.id, PublicKey: &cfg.pubKey, - ChainConfigs: []*models.NodeChainConfig{ + ChainConfigs: []*nodev1.ChainConfig{ { - ID: "test chain", - Network: &models.Network{ - ID: "test network 1", - ChainID: strconv.FormatUint(cfg.registryChain.EvmChainID, 10), - ChainType: models.ChainTypeEvm, + Chain: &nodev1.Chain{ + Id: "test chain", + Type: nodev1.ChainType_CHAIN_TYPE_EVM, }, AdminAddress: cfg.admin.String(), - Ocr2Config: &models.NodeOCR2Config{ - P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ - PeerID: cfg.p2p.PeerID().String(), + Ocr2Config: &nodev1.OCR2Config{ + P2PKeyBundle: &nodev1.OCR2Config_P2PKeyBundle{ + PeerId: cfg.p2p.PeerID().String(), }, - OcrKeyBundle: &models.NodeOCR2ConfigOCRKeyBundle{ + OcrKeyBundle: &nodev1.OCR2Config_OCRKeyBundle{ OnchainSigningAddress: cfg.signingAddr, }, }, @@ -198,7 +197,8 @@ func newNode(t *testing.T, cfg minimalNodeCfg) *models.Node { } type setupUpdateDonTestConfig struct { - dons []kslib.DonCapabilities + dons []kslib.DonInfo + nops []keystone.NOP } type setupUpdateDonTestResult struct { @@ -208,28 +208,19 @@ type setupUpdateDonTestResult struct { func setupUpdateDonTest(t *testing.T, lggr logger.Logger, cfg setupUpdateDonTestConfig) *kstest.SetupTestRegistryResponse { t.Helper() - req := newSetupTestRegistryRequest(t, cfg.dons) + req := newSetupTestRegistryRequest(t, cfg.dons, cfg.nops) return kstest.SetupTestRegistry(t, lggr, req) } -func newSetupTestRegistryRequest(t *testing.T, dons []kslib.DonCapabilities) *kstest.SetupTestRegistryRequest { +func newSetupTestRegistryRequest(t *testing.T, dons []kslib.DonInfo, nops []keystone.NOP) *kstest.SetupTestRegistryRequest { t.Helper() - allNops := make(map[string]*models.NodeOperator) + nodes := make(map[string]keystone.Node) for _, don := range dons { - for _, nop := range don.Nops { - nop := nop - n, exists := allNops[nop.ID] - if exists { - nop.Nodes = append(n.Nodes, nop.Nodes...) - } - allNops[nop.ID] = nop + for _, node := range don.Nodes { + nodes[node.ID] = node } } - var nops []*models.NodeOperator - for _, nop := range allNops { - nops = append(nops, nop) - } - nopsToNodes := makeNopToNodes(t, nops) + nopsToNodes := makeNopToNodes(t, nops, nodes) testDons := makeTestDon(t, dons) p2pToCapabilities := makeP2PToCapabilities(t, dons) req := &kstest.SetupTestRegistryRequest{ @@ -240,46 +231,45 @@ func newSetupTestRegistryRequest(t *testing.T, dons []kslib.DonCapabilities) *ks return req } -func makeNopToNodes(t *testing.T, cloNops []*models.NodeOperator) map[kcr.CapabilitiesRegistryNodeOperator][]*internal.P2PSignerEnc { +func makeNopToNodes(t *testing.T, nops []keystone.NOP, nodes map[string]keystone.Node) map[kcr.CapabilitiesRegistryNodeOperator][]*internal.P2PSignerEnc { nopToNodes := make(map[kcr.CapabilitiesRegistryNodeOperator][]*internal.P2PSignerEnc) - for _, nop := range cloNops { + for _, nop := range nops { // all chain configs are the same wrt admin address & node keys // so we can just use the first one crnop := kcr.CapabilitiesRegistryNodeOperator{ Name: nop.Name, - Admin: common.HexToAddress(nop.Nodes[0].ChainConfigs[0].AdminAddress), + Admin: common.HexToAddress(nodes[nop.Nodes[0]].ChainConfigs[0].AdminAddress), } - var nodes []*internal.P2PSignerEnc - for _, node := range nop.Nodes { + var signers []*internal.P2PSignerEnc + for _, nodeID := range nop.Nodes { + node := nodes[nodeID] require.NotNil(t, node.PublicKey, "public key is nil %s", node.ID) // all chain configs are the same wrt admin address & node keys - p, err := kscs.NewP2PSignerEncFromCLO(node.ChainConfigs[0], *node.PublicKey) + p, err := kscs.NewP2PSignerEncFromJD(node.ChainConfigs[0], *node.PublicKey) require.NoError(t, err, "failed to make p2p signer enc from clo nod %s", node.ID) - nodes = append(nodes, p) + signers = append(signers, p) } - nopToNodes[crnop] = nodes + nopToNodes[crnop] = signers } return nopToNodes } -func makeP2PToCapabilities(t *testing.T, dons []kslib.DonCapabilities) map[p2pkey.PeerID][]kcr.CapabilitiesRegistryCapability { +func makeP2PToCapabilities(t *testing.T, dons []kslib.DonInfo) map[p2pkey.PeerID][]kcr.CapabilitiesRegistryCapability { p2pToCapabilities := make(map[p2pkey.PeerID][]kcr.CapabilitiesRegistryCapability) for _, don := range dons { - for _, nop := range don.Nops { - for _, node := range nop.Nodes { - for _, cap := range don.Capabilities { - p, err := kscs.NewP2PSignerEncFromCLO(node.ChainConfigs[0], *node.PublicKey) - require.NoError(t, err, "failed to make p2p signer enc from clo nod %s", node.ID) - p2pToCapabilities[p.P2PKey] = append(p2pToCapabilities[p.P2PKey], cap) - } + for _, node := range don.Nodes { + for _, cap := range don.Capabilities { + p, err := kscs.NewP2PSignerEncFromJD(node.ChainConfigs[0], *node.PublicKey) + require.NoError(t, err, "failed to make p2p signer enc from clo nod %s", node.ID) + p2pToCapabilities[p.P2PKey] = append(p2pToCapabilities[p.P2PKey], cap) } } } return p2pToCapabilities } -func makeTestDon(t *testing.T, dons []kslib.DonCapabilities) []kstest.Don { +func makeTestDon(t *testing.T, dons []kslib.DonInfo) []kstest.Don { out := make([]kstest.Don, len(dons)) for i, don := range dons { out[i] = testDon(t, don) @@ -287,16 +277,14 @@ func makeTestDon(t *testing.T, dons []kslib.DonCapabilities) []kstest.Don { return out } -func testDon(t *testing.T, don kslib.DonCapabilities) kstest.Don { +func testDon(t *testing.T, don kslib.DonInfo) kstest.Don { var p2pids []p2pkey.PeerID - for _, nop := range don.Nops { - for _, node := range nop.Nodes { - // all chain configs are the same wrt admin address & node keys - // so we can just use the first one - p, err := kscs.NewP2PSignerEncFromCLO(node.ChainConfigs[0], *node.PublicKey) - require.NoError(t, err, "failed to make p2p signer enc from clo nod %s", node.ID) - p2pids = append(p2pids, p.P2PKey) - } + for _, node := range don.Nodes { + // all chain configs are the same wrt admin address & node keys + // so we can just use the first one + p, err := kscs.NewP2PSignerEncFromJD(node.ChainConfigs[0], *node.PublicKey) + require.NoError(t, err, "failed to make p2p signer enc from clo nod %s", node.ID) + p2pids = append(p2pids, p.P2PKey) } var capabilityConfigs []internal.CapabilityConfig diff --git a/deployment/keystone/changeset/internal/update_nodes_test.go b/deployment/keystone/changeset/internal/update_nodes_test.go index d764c4835c2..5488e5c761d 100644 --- a/deployment/keystone/changeset/internal/update_nodes_test.go +++ b/deployment/keystone/changeset/internal/update_nodes_test.go @@ -511,7 +511,7 @@ func testPeerID(t *testing.T, s string) p2pkey.PeerID { } func testChain(t *testing.T) deployment.Chain { - chains, _ := memory.NewMemoryChains(t, 1) + chains := memory.NewMemoryChains(t, 1) var chain deployment.Chain for _, c := range chains { chain = c diff --git a/deployment/keystone/changeset/types.go b/deployment/keystone/changeset/types.go index e8a86fa4272..fb609041792 100644 --- a/deployment/keystone/changeset/types.go +++ b/deployment/keystone/changeset/types.go @@ -6,24 +6,17 @@ import ( "fmt" v1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" - "github.com/smartcontractkit/chainlink/deployment/environment/clo" - "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey" ) -func NewP2PSignerEncFromCLO(cc *models.NodeChainConfig, pubkey string) (*P2PSignerEnc, error) { - ccfg := clo.NewChainConfig(cc) - var pubkeyB [32]byte - if _, err := hex.Decode(pubkeyB[:], []byte(pubkey)); err != nil { - return nil, fmt.Errorf("failed to decode pubkey %s: %w", pubkey, err) - } - return newP2PSignerEncFromJD(ccfg, pubkeyB) -} - -func newP2PSignerEncFromJD(ccfg *v1.ChainConfig, pubkey [32]byte) (*P2PSignerEnc, error) { +func NewP2PSignerEncFromJD(ccfg *v1.ChainConfig, pubkeyStr string) (*P2PSignerEnc, error) { if ccfg == nil { return nil, errors.New("nil ocr2config") } + var pubkey [32]byte + if _, err := hex.Decode(pubkey[:], []byte(pubkeyStr)); err != nil { + return nil, fmt.Errorf("failed to decode pubkey %s: %w", pubkey, err) + } ocfg := ccfg.Ocr2Config p2p := p2pkey.PeerID{} if err := p2p.UnmarshalString(ocfg.P2PKeyBundle.PeerId); err != nil { diff --git a/deployment/keystone/changeset/update_don.go b/deployment/keystone/changeset/update_don.go index 1a535c5aa11..1ab40d5a935 100644 --- a/deployment/keystone/changeset/update_don.go +++ b/deployment/keystone/changeset/update_don.go @@ -8,7 +8,7 @@ import ( kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry" ) -var _ deployment.ChangeSet = UpdateDon +var _ deployment.ChangeSet[*UpdateDonRequest] = UpdateDon // CapabilityConfig is a struct that holds a capability and its configuration type CapabilityConfig = internal.CapabilityConfig @@ -22,8 +22,7 @@ type UpdateDonResponse struct { // UpdateDon updates the capabilities of a Don // This a complex action in practice that involves registering missing capabilities, adding the nodes, and updating // the capabilities of the DON -func UpdateDon(env deployment.Environment, cfg any) (deployment.ChangesetOutput, error) { - req := cfg.(*UpdateDonRequest) +func UpdateDon(env deployment.Environment, req *UpdateDonRequest) (deployment.ChangesetOutput, error) { _, err := internal.UpdateDon(env.Logger, req) if err != nil { return deployment.ChangesetOutput{}, fmt.Errorf("failed to update don: %w", err) diff --git a/deployment/keystone/changeset/update_node_capabilities.go b/deployment/keystone/changeset/update_node_capabilities.go index 09cf351cc85..0b6c4fb5462 100644 --- a/deployment/keystone/changeset/update_node_capabilities.go +++ b/deployment/keystone/changeset/update_node_capabilities.go @@ -5,8 +5,9 @@ import ( "fmt" chainsel "github.com/smartcontractkit/chain-selectors" + "github.com/smartcontractkit/chainlink/deployment" - "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" + "github.com/smartcontractkit/chainlink/deployment/keystone" kslib "github.com/smartcontractkit/chainlink/deployment/keystone" "github.com/smartcontractkit/chainlink/deployment/keystone/changeset/internal" @@ -14,11 +15,11 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey" ) -var _ deployment.ChangeSet = UpdateNodeCapabilities +var _ deployment.ChangeSet[*MutateNodeCapabilitiesRequest] = UpdateNodeCapabilities type P2PSignerEnc = internal.P2PSignerEnc -func NewP2PSignerEnc(n *models.Node, registryChainSel uint64) (*P2PSignerEnc, error) { +func NewP2PSignerEnc(n *keystone.Node, registryChainSel uint64) (*P2PSignerEnc, error) { p2p, signer, enc, err := kslib.ExtractKeys(n, registryChainSel) if err != nil { return nil, fmt.Errorf("failed to extract keys: %w", err) @@ -84,11 +85,7 @@ func (req *MutateNodeCapabilitiesRequest) updateNodeCapabilitiesImplRequest(e de } // UpdateNodeCapabilities updates the capabilities of nodes in the registry -func UpdateNodeCapabilities(env deployment.Environment, config any) (deployment.ChangesetOutput, error) { - req, ok := config.(*MutateNodeCapabilitiesRequest) - if !ok { - return deployment.ChangesetOutput{}, fmt.Errorf("invalid config type. want %T, got %T", &MutateNodeCapabilitiesRequest{}, config) - } +func UpdateNodeCapabilities(env deployment.Environment, req *MutateNodeCapabilitiesRequest) (deployment.ChangesetOutput, error) { c, err := req.updateNodeCapabilitiesImplRequest(env) if err != nil { return deployment.ChangesetOutput{}, fmt.Errorf("failed to convert request: %w", err) diff --git a/deployment/keystone/deploy.go b/deployment/keystone/deploy.go index f0231338ac3..a43f906178e 100644 --- a/deployment/keystone/deploy.go +++ b/deployment/keystone/deploy.go @@ -7,15 +7,18 @@ import ( "encoding/hex" "errors" "fmt" + "slices" "sort" "strings" "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/rpc" + "golang.org/x/exp/maps" + nodev1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" + "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/shared/ptypes" "github.com/smartcontractkit/chainlink/deployment" - "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/durationpb" @@ -49,8 +52,10 @@ func (r ConfigureContractsRequest) Validate() error { if r.Env == nil { return errors.New("environment is nil") } - if len(r.Dons) == 0 { - return errors.New("no DONS") + for _, don := range r.Dons { + if err := don.Validate(); err != nil { + return fmt.Errorf("don validation failed for '%s': %w", don.Name, err) + } } _, ok := chainsel.ChainBySelector(r.RegistryChainSel) if !ok { @@ -90,8 +95,13 @@ func ConfigureContracts(ctx context.Context, lggr logger.Logger, req ConfigureCo return nil, fmt.Errorf("failed to configure registry: %w", err) } + donInfos, err := DonInfos(req.Dons, req.Env.Offchain) + if err != nil { + return nil, fmt.Errorf("failed to get don infos: %w", err) + } + // now we have the capability registry set up we need to configure the forwarder contracts and the OCR3 contract - dons, err := joinInfoAndNodes(cfgRegistryResp.DonInfos, req.Dons, req.RegistryChainSel) + dons, err := joinInfoAndNodes(cfgRegistryResp.DonInfos, donInfos, req.RegistryChainSel) if err != nil { return nil, fmt.Errorf("failed to assimilate registry to Dons: %w", err) } @@ -137,6 +147,101 @@ func DeployContracts(lggr logger.Logger, e *deployment.Environment, chainSel uin }, nil } +// DonInfo is DonCapabilities, but expanded to contain node information +type DonInfo struct { + Name string + Nodes []Node + Capabilities []kcr.CapabilitiesRegistryCapability // every capability is hosted on each node +} + +// TODO: merge with deployment/environment.go Node +type Node struct { + ID string + P2PID string + Name string + PublicKey *string + ChainConfigs []*nodev1.ChainConfig +} + +// TODO: merge with deployment/environment.go NodeInfo, we currently lookup based on p2p_id, and chain-selectors needs non-EVM support +func NodesFromJD(name string, nodeIDs []string, jd deployment.OffchainClient) ([]Node, error) { + // lookup nodes based on p2p_ids + var nodes []Node + selector := strings.Join(nodeIDs, ",") + nodesFromJD, err := jd.ListNodes(context.Background(), &nodev1.ListNodesRequest{ + Filter: &nodev1.ListNodesRequest_Filter{ + Enabled: 1, + Selectors: []*ptypes.Selector{ + { + Key: "p2p_id", + Op: ptypes.SelectorOp_IN, + Value: &selector, + }, + }, + }, + }) + if err != nil { + return nil, fmt.Errorf("failed to list nodes '%s': %w", name, err) + } + + for _, id := range nodeIDs { + idx := slices.IndexFunc(nodesFromJD.GetNodes(), func(node *nodev1.Node) bool { + return slices.ContainsFunc(node.Labels, func(label *ptypes.Label) bool { + return label.Key == "p2p_id" && *label.Value == id + }) + }) + if idx < 0 { + var got []string + for _, node := range nodesFromJD.GetNodes() { + for _, label := range node.Labels { + if label.Key == "p2p_id" { + got = append(got, *label.Value) + } + } + } + return nil, fmt.Errorf("node id %s not found in list '%s'", id, strings.Join(got, ",")) + } + + jdNode := nodesFromJD.Nodes[idx] + // TODO: Filter should accept multiple nodes + nodeChainConfigs, err := jd.ListNodeChainConfigs(context.Background(), &nodev1.ListNodeChainConfigsRequest{Filter: &nodev1.ListNodeChainConfigsRequest_Filter{ + NodeIds: []string{jdNode.Id}, // must use the jd-specific internal node id + }}) + if err != nil { + return nil, err + } + + nodes = append(nodes, Node{ + ID: jdNode.Id, + P2PID: id, + Name: name, + PublicKey: &jdNode.PublicKey, + ChainConfigs: nodeChainConfigs.GetChainConfigs(), + }) + } + return nodes, nil +} + +func DonInfos(dons []DonCapabilities, jd deployment.OffchainClient) ([]DonInfo, error) { + var donInfos []DonInfo + for _, don := range dons { + var nodeIDs []string + for _, nop := range don.Nops { + nodeIDs = append(nodeIDs, nop.Nodes...) + } + nodes, err := NodesFromJD(don.Name, nodeIDs, jd) + if err != nil { + return nil, err + } + donInfos = append(donInfos, DonInfo{ + Name: don.Name, + Nodes: nodes, + Capabilities: don.Capabilities, + }) + } + return donInfos, nil +} + // ConfigureRegistry configures the registry contract with the given DONS and their capabilities // the address book is required to contain the addresses of the deployed registry contract func ConfigureRegistry(ctx context.Context, lggr logger.Logger, req ConfigureContractsRequest, addrBook deployment.AddressBook) (*ConfigureContractsResponse, error) { @@ -153,6 +258,11 @@ func ConfigureRegistry(ctx context.Context, lggr logger.Logger, req ConfigureCon return nil, fmt.Errorf("failed to get contract sets: %w", err) } + donInfos, err := DonInfos(req.Dons, req.Env.Offchain) + if err != nil { + return nil, fmt.Errorf("failed to get don infos: %w", err) + } + // ensure registry is deployed and get the registry contract and chain var registry *kcr.CapabilitiesRegistry registryChainContracts, ok := contractSetsResp.ContractSets[req.RegistryChainSel] @@ -167,17 +277,17 @@ func ConfigureRegistry(ctx context.Context, lggr logger.Logger, req ConfigureCon // all the subsequent calls to the registry are in terms of nodes // compute the mapping of dons to their nodes for reuse in various registry calls - donToOcr2Nodes, err := mapDonsToNodes(req.Dons, true, req.RegistryChainSel) + donToOcr2Nodes, err := mapDonsToNodes(donInfos, true, req.RegistryChainSel) if err != nil { return nil, fmt.Errorf("failed to map dons to nodes: %w", err) } - // TODO: we can remove this abstractions and refactor the functions that accept them to accept []DonCapabilities + // TODO: we can remove this abstractions and refactor the functions that accept them to accept []DonInfos/DonCapabilities // they are unnecessary indirection - donToCapabilities := mapDonsToCaps(req.Dons) - nodeIdToNop, err := nodesToNops(req.Dons, req.RegistryChainSel) + donToCapabilities := mapDonsToCaps(donInfos) + nopsToNodeIDs, err := nopsToNodes(donInfos, req.Dons, req.RegistryChainSel) if err != nil { - return nil, fmt.Errorf("failed to map nodes to nops: %w", err) + return nil, fmt.Errorf("failed to map nops to nodes: %w", err) } // register capabilities @@ -192,14 +302,7 @@ func ConfigureRegistry(ctx context.Context, lggr logger.Logger, req ConfigureCon lggr.Infow("registered capabilities", "capabilities", capabilitiesResp.donToCapabilities) // register node operators - dedupedNops := make(map[kcr.CapabilitiesRegistryNodeOperator]struct{}) - var nopsList []kcr.CapabilitiesRegistryNodeOperator - for _, nop := range nodeIdToNop { - dedupedNops[nop] = struct{}{} - } - for nop := range dedupedNops { - nopsList = append(nopsList, nop) - } + nopsList := maps.Keys(nopsToNodeIDs) nopsResp, err := RegisterNOPS(ctx, lggr, RegisterNOPSRequest{ Chain: registryChain, Registry: registry, @@ -214,7 +317,7 @@ func ConfigureRegistry(ctx context.Context, lggr logger.Logger, req ConfigureCon nodesResp, err := registerNodes(lggr, ®isterNodesRequest{ registry: registry, chain: registryChain, - nodeIdToNop: nodeIdToNop, + nopToNodeIDs: nopsToNodeIDs, donToOcr2Nodes: donToOcr2Nodes, donToCapabilities: capabilitiesResp.donToCapabilities, nops: nopsResp.Nops, @@ -224,6 +327,8 @@ func ConfigureRegistry(ctx context.Context, lggr logger.Logger, req ConfigureCon } lggr.Infow("registered nodes", "nodes", nodesResp.nodeIDToParams) + // TODO: annotate nodes with node_operator_id in JD? + // register DONS donsResp, err := registerDons(lggr, registerDonsRequest{ registry: registry, @@ -318,7 +423,7 @@ func ConfigureOCR3Contract(env *deployment.Environment, chainSel uint64, dons [] return nil } -func ConfigureOCR3ContractFromCLO(env *deployment.Environment, chainSel uint64, nodes []*models.Node, addrBook deployment.AddressBook, cfg *OracleConfigWithSecrets) error { +func ConfigureOCR3ContractFromJD(env *deployment.Environment, chainSel uint64, nodeIDs []string, addrBook deployment.AddressBook, cfg *OracleConfigWithSecrets) error { registryChain, ok := env.Chains[chainSel] if !ok { return fmt.Errorf("chain %d not found in environment", chainSel) @@ -338,9 +443,13 @@ func ConfigureOCR3ContractFromCLO(env *deployment.Environment, chainSel uint64, if contract == nil { return fmt.Errorf("no ocr3 contract found for chain %d", chainSel) } + nodes, err := NodesFromJD("nodes", nodeIDs, env.Offchain) + if err != nil { + return err + } var ocr2nodes []*ocr2Node for _, node := range nodes { - n, err := newOcr2NodeFromClo(node, chainSel) + n, err := newOcr2NodeFromJD(&node, chainSel) if err != nil { return fmt.Errorf("failed to create ocr2 node from clo node: %w", err) } @@ -549,7 +658,7 @@ func DecodeErr(encodedABI string, err error) error { type registerNodesRequest struct { registry *kcr.CapabilitiesRegistry chain deployment.Chain - nodeIdToNop map[string]kcr.CapabilitiesRegistryNodeOperator + nopToNodeIDs map[kcr.CapabilitiesRegistryNodeOperator][]string donToOcr2Nodes map[string][]*ocr2Node donToCapabilities map[string][]RegisteredCapability nops []*kcr.CapabilitiesRegistryNodeOperatorAdded @@ -562,21 +671,18 @@ type registerNodesResponse struct { // can sign the transactions update the contract state // TODO: 467 refactor to support MCMS. Specifically need to separate the call data generation from the actual contract call func registerNodes(lggr logger.Logger, req *registerNodesRequest) (*registerNodesResponse, error) { - lggr.Infow("registering nodes...", "len", len(req.nodeIdToNop)) - nopToNodeIDs := make(map[kcr.CapabilitiesRegistryNodeOperator][]string) - for nodeID, nop := range req.nodeIdToNop { - if _, ok := nopToNodeIDs[nop]; !ok { - nopToNodeIDs[nop] = make([]string, 0) - } - nopToNodeIDs[nop] = append(nopToNodeIDs[nop], nodeID) + var count int + for _, nodes := range req.nopToNodeIDs { + count += len(nodes) } + lggr.Infow("registering nodes...", "len", count) nodeToRegisterNop := make(map[string]*kcr.CapabilitiesRegistryNodeOperatorAdded) for _, nop := range req.nops { n := kcr.CapabilitiesRegistryNodeOperator{ Name: nop.Name, Admin: nop.Admin, } - nodeIDs := nopToNodeIDs[n] + nodeIDs := req.nopToNodeIDs[n] for _, nodeID := range nodeIDs { _, exists := nodeToRegisterNop[nodeID] if !exists { diff --git a/deployment/keystone/deploy_test.go b/deployment/keystone/deploy_test.go index 96350a91d6c..4e0d2a52dcc 100644 --- a/deployment/keystone/deploy_test.go +++ b/deployment/keystone/deploy_test.go @@ -4,11 +4,14 @@ import ( "encoding/json" "fmt" "os" + "strconv" "testing" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/stretchr/testify/assert" "github.com/test-go/testify/require" + "go.uber.org/zap/zapcore" + "golang.org/x/exp/maps" chainsel "github.com/smartcontractkit/chain-selectors" @@ -17,6 +20,7 @@ import ( "github.com/smartcontractkit/chainlink/deployment" "github.com/smartcontractkit/chainlink/deployment/environment/clo" "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" "github.com/smartcontractkit/chainlink/deployment/keystone" kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry" "github.com/smartcontractkit/chainlink/v2/core/logger" @@ -25,6 +29,204 @@ import ( func TestDeploy(t *testing.T) { lggr := logger.TestLogger(t) + // sepolia; all nodes are on the this chain + sepoliaChainId := uint64(11155111) + sepoliaArbitrumChainId := uint64(421614) + + sepoliaChainSel, err := chainsel.SelectorFromChainId(sepoliaChainId) + require.NoError(t, err) + // sepoliaArbitrumChainSel, err := chainsel.SelectorFromChainId(sepoliaArbitrumChainId) + // require.NoError(t, err) + // aptosChainSel := uint64(999) // TODO: + + crConfig := deployment.CapabilityRegistryConfig{ + EVMChainID: sepoliaChainId, + Contract: [20]byte{}, + } + + evmChains := memory.NewMemoryChainsWithChainIDs(t, []uint64{sepoliaChainId, sepoliaArbitrumChainId}) + // aptosChain := memory.NewMemoryChain(t, aptosChainSel) + + wfChains := map[uint64]deployment.Chain{} + wfChains[sepoliaChainSel] = evmChains[sepoliaChainSel] + // wfChains[aptosChainSel] = aptosChain + wfNodes := memory.NewNodes(t, zapcore.InfoLevel, wfChains, 4, 0, crConfig) + require.Len(t, wfNodes, 4) + + cwNodes := memory.NewNodes(t, zapcore.InfoLevel, evmChains, 4, 0, crConfig) + + assetChains := map[uint64]deployment.Chain{} + assetChains[sepoliaChainSel] = evmChains[sepoliaChainSel] + assetNodes := memory.NewNodes(t, zapcore.InfoLevel, assetChains, 4, 0, crConfig) + require.Len(t, assetNodes, 4) + + // TODO: partition nodes into multiple nops + + wfDon := keystone.DonCapabilities{ + Name: keystone.WFDonName, + Nops: []keystone.NOP{ + { + Name: "nop 1", + Nodes: maps.Keys(wfNodes), + }, + }, + Capabilities: []kcr.CapabilitiesRegistryCapability{keystone.OCR3Cap}, + } + cwDon := keystone.DonCapabilities{ + Name: keystone.TargetDonName, + Nops: []keystone.NOP{ + { + Name: "nop 2", + Nodes: maps.Keys(cwNodes), + }, + }, + Capabilities: []kcr.CapabilitiesRegistryCapability{keystone.WriteChainCap}, + } + assetDon := keystone.DonCapabilities{ + Name: keystone.StreamDonName, + Nops: []keystone.NOP{ + { + Name: "nop 3", + Nodes: maps.Keys(assetNodes), + }, + }, + Capabilities: []kcr.CapabilitiesRegistryCapability{keystone.StreamTriggerCap}, + } + + allChains := make(map[uint64]deployment.Chain) + maps.Copy(allChains, evmChains) + // allChains[aptosChainSel] = aptosChain + + allNodes := make(map[string]memory.Node) + maps.Copy(allNodes, wfNodes) + maps.Copy(allNodes, cwNodes) + maps.Copy(allNodes, assetNodes) + env := memory.NewMemoryEnvironmentFromChainsNodes(t, lggr, allChains, allNodes) + + var ocr3Config = keystone.OracleConfigWithSecrets{ + OracleConfig: keystone.OracleConfig{ + MaxFaultyOracles: len(wfNodes) / 3, + }, + OCRSecrets: deployment.XXXGenerateTestOCRSecrets(), + } + + ctx := tests.Context(t) + // explicitly deploy the contracts + cs, err := keystone.DeployContracts(lggr, &env, sepoliaChainSel) + require.NoError(t, err) + env.ExistingAddresses = cs.AddressBook + deployReq := keystone.ConfigureContractsRequest{ + RegistryChainSel: sepoliaChainSel, + Env: &env, + OCR3Config: &ocr3Config, + Dons: []keystone.DonCapabilities{wfDon, cwDon, assetDon}, + DoContractDeploy: false, + } + deployResp, err := keystone.ConfigureContracts(ctx, lggr, deployReq) + require.NoError(t, err) + ad := deployResp.Changeset.AddressBook + addrs, err := ad.Addresses() + require.NoError(t, err) + lggr.Infow("Deployed Keystone contracts", "address book", addrs) + + // all contracts on home chain + homeChainAddrs, err := ad.AddressesForChain(sepoliaChainSel) + require.NoError(t, err) + require.Len(t, homeChainAddrs, 3) + // only forwarder on non-home chain + for sel := range env.Chains { + chainAddrs, err := ad.AddressesForChain(sel) + require.NoError(t, err) + if sel != sepoliaChainSel { + require.Len(t, chainAddrs, 1) + } else { + require.Len(t, chainAddrs, 3) + } + containsForwarder := false + for _, tv := range chainAddrs { + if tv.Type == keystone.KeystoneForwarder { + containsForwarder = true + break + } + } + require.True(t, containsForwarder, "no forwarder found in %v on chain %d for target don", chainAddrs, sel) + } + req := &keystone.GetContractSetsRequest{ + Chains: env.Chains, + AddressBook: ad, + } + + contractSetsResp, err := keystone.GetContractSets(lggr, req) + require.NoError(t, err) + require.Len(t, contractSetsResp.ContractSets, len(env.Chains)) + // check the registry + regChainContracts, ok := contractSetsResp.ContractSets[sepoliaChainSel] + require.True(t, ok) + gotRegistry := regChainContracts.CapabilitiesRegistry + require.NotNil(t, gotRegistry) + // contract reads + gotDons, err := gotRegistry.GetDONs(&bind.CallOpts{}) + if err != nil { + err = keystone.DecodeErr(kcr.CapabilitiesRegistryABI, err) + require.Fail(t, fmt.Sprintf("failed to get Dons from registry at %s: %s", gotRegistry.Address().String(), err)) + } + require.NoError(t, err) + assert.Len(t, gotDons, len(deployReq.Dons)) + + for n, info := range deployResp.DonInfos { + found := false + for _, gdon := range gotDons { + if gdon.Id == info.Id { + found = true + assert.EqualValues(t, info, gdon) + break + } + } + require.True(t, found, "don %s not found in registry", n) + } + // check the forwarder + for _, cs := range contractSetsResp.ContractSets { + forwarder := cs.Forwarder + require.NotNil(t, forwarder) + // any read to ensure that the contract is deployed correctly + _, err := forwarder.Owner(&bind.CallOpts{}) + require.NoError(t, err) + // TODO expand this test; there is no get method on the forwarder so unclear how to test it + } + // check the ocr3 contract + for chainSel, cs := range contractSetsResp.ContractSets { + if chainSel != sepoliaChainSel { + require.Nil(t, cs.OCR3) + continue + } + require.NotNil(t, cs.OCR3) + // any read to ensure that the contract is deployed correctly + _, err := cs.OCR3.LatestConfigDetails(&bind.CallOpts{}) + require.NoError(t, err) + } +} + +// TODO: Deprecated, remove everything below that leverages CLO + +func nodeOperatorsToIDs(t *testing.T, nops []*models.NodeOperator) (nodeIDs []keystone.NOP) { + for _, nop := range nops { + nodeOperator := keystone.NOP{ + Name: nop.Name, + } + for _, node := range nop.Nodes { + p2pID, err := clo.NodeP2PId(node) + require.NoError(t, err) + + nodeOperator.Nodes = append(nodeOperator.Nodes, p2pID) + } + nodeIDs = append(nodeIDs, nodeOperator) + } + return nodeIDs +} + +func TestDeployCLO(t *testing.T) { + lggr := logger.TestLogger(t) + wfNops := loadTestNops(t, "testdata/workflow_nodes.json") cwNops := loadTestNops(t, "testdata/chain_writer_nodes.json") assetNops := loadTestNops(t, "testdata/asset_nodes.json") @@ -35,23 +237,65 @@ func TestDeploy(t *testing.T) { require.Len(t, assetNops, 16) requireChains(t, assetNops, []models.ChainType{models.ChainTypeEvm}) + wfNodes := nodeOperatorsToIDs(t, wfNops) + cwNodes := nodeOperatorsToIDs(t, cwNops) + assetNodes := nodeOperatorsToIDs(t, assetNops) + wfDon := keystone.DonCapabilities{ Name: keystone.WFDonName, - Nops: wfNops, + Nops: wfNodes, Capabilities: []kcr.CapabilitiesRegistryCapability{keystone.OCR3Cap}, } cwDon := keystone.DonCapabilities{ Name: keystone.TargetDonName, - Nops: cwNops, + Nops: cwNodes, Capabilities: []kcr.CapabilitiesRegistryCapability{keystone.WriteChainCap}, } assetDon := keystone.DonCapabilities{ Name: keystone.StreamDonName, - Nops: assetNops, + Nops: assetNodes, Capabilities: []kcr.CapabilitiesRegistryCapability{keystone.StreamTriggerCap}, } - env := makeMultiDonTestEnv(t, lggr, []keystone.DonCapabilities{wfDon, cwDon, assetDon}) + var allNops []*models.NodeOperator + allNops = append(allNops, wfNops...) + allNops = append(allNops, cwNops...) + allNops = append(allNops, assetNops...) + + chains := make(map[uint64]struct{}) + for _, nop := range allNops { + for _, node := range nop.Nodes { + for _, chain := range node.ChainConfigs { + // chain selector lib doesn't support chain id 2 and we don't use it in tests + // because it's not an evm chain + if chain.Network.ChainID == "2" { // aptos chain + continue + } + id, err := strconv.ParseUint(chain.Network.ChainID, 10, 64) + require.NoError(t, err, "failed to parse chain id to uint64") + chains[id] = struct{}{} + } + } + } + var chainIDs []uint64 + for c := range chains { + chainIDs = append(chainIDs, c) + } + allChains := memory.NewMemoryChainsWithChainIDs(t, chainIDs) + + env := &deployment.Environment{ + Name: "CLO", + ExistingAddresses: deployment.NewMemoryAddressBook(), + Offchain: clo.NewJobClient(lggr, clo.JobClientConfig{Nops: allNops}), + Chains: allChains, + Logger: lggr, + } + // assume that all the nodes in the provided input nops are part of the don + for _, nop := range allNops { + for _, node := range nop.Nodes { + env.NodeIDs = append(env.NodeIDs, node.ID) + } + } // sepolia; all nodes are on the this chain registryChainSel, err := chainsel.SelectorFromChainId(11155111) @@ -186,25 +430,6 @@ func requireChains(t *testing.T, donNops []*models.NodeOperator, cs []models.Cha } } -func makeMultiDonTestEnv(t *testing.T, lggr logger.Logger, dons []keystone.DonCapabilities) *deployment.Environment { - var donToEnv = make(map[string]*deployment.Environment) - // chain selector lib doesn't support chain id 2 and we don't use it in tests - // because it's not an evm chain - ignoreAptos := func(c *models.NodeChainConfig) bool { - return c.Network.ChainID == "2" // aptos chain - } - for _, don := range dons { - env := clo.NewDonEnvWithMemoryChains(t, clo.DonEnvConfig{ - DonName: don.Name, - Nops: don.Nops, - Logger: lggr, - }, ignoreAptos) - donToEnv[don.Name] = env - } - menv := clo.NewTestEnv(t, lggr, donToEnv) - return menv.Flatten("testing-env") -} - func loadTestNops(t *testing.T, pth string) []*models.NodeOperator { f, err := os.ReadFile(pth) require.NoError(t, err) diff --git a/deployment/keystone/types.go b/deployment/keystone/types.go index e01ec6d0d55..e5657657ed9 100644 --- a/deployment/keystone/types.go +++ b/deployment/keystone/types.go @@ -4,6 +4,7 @@ import ( "encoding/hex" "errors" "fmt" + "slices" "sort" "strconv" "strings" @@ -13,7 +14,6 @@ import ( chainsel "github.com/smartcontractkit/chain-selectors" "github.com/smartcontractkit/chainlink/deployment" - "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" v1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" @@ -100,8 +100,7 @@ func (o *ocr2Node) toNodeKeys() NodeKeys { AptosOnchainPublicKey: aptosOnchainPublicKey, } } - -func newOcr2NodeFromClo(n *models.Node, registryChainSel uint64) (*ocr2Node, error) { +func newOcr2NodeFromJD(n *Node, registryChainSel uint64) (*ocr2Node, error) { if n.PublicKey == nil { return nil, errors.New("no public key") } @@ -110,22 +109,22 @@ func newOcr2NodeFromClo(n *models.Node, registryChainSel uint64) (*ocr2Node, err return nil, errors.New("no chain configs") } // all nodes should have an evm chain config, specifically the registry chain - evmCC, err := registryChainConfig(n.ChainConfigs, chaintype.EVM, registryChainSel) + evmCC, err := registryChainConfig(n.ChainConfigs, v1.ChainType_CHAIN_TYPE_EVM, registryChainSel) if err != nil { return nil, fmt.Errorf("failed to get registry chain config for sel %d: %w", registryChainSel, err) } cfgs := map[chaintype.ChainType]*v1.ChainConfig{ chaintype.EVM: evmCC, } - aptosCC, exists := firstChainConfigByType(n.ChainConfigs, chaintype.Aptos) + aptosCC, exists := firstChainConfigByType(n.ChainConfigs, v1.ChainType_CHAIN_TYPE_APTOS) if exists { cfgs[chaintype.Aptos] = aptosCC } return newOcr2Node(n.ID, cfgs, *n.PublicKey) } -func ExtractKeys(n *models.Node, registerChainSel uint64) (p2p p2pkey.PeerID, signer [32]byte, encPubKey [32]byte, err error) { - orc2n, err := newOcr2NodeFromClo(n, registerChainSel) +func ExtractKeys(n *Node, registerChainSel uint64) (p2p p2pkey.PeerID, signer [32]byte, encPubKey [32]byte, err error) { + orc2n, err := newOcr2NodeFromJD(n, registerChainSel) if err != nil { return p2p, signer, encPubKey, fmt.Errorf("failed to create ocr2 node for node %s: %w", n.ID, err) } @@ -201,28 +200,52 @@ func makeNodeKeysSlice(nodes []*ocr2Node) []NodeKeys { return out } +type NOP struct { + Name string + Nodes []string // peerID +} + +func (v NOP) Validate() error { + if v.Name == "" { + return errors.New("name is empty") + } + if len(v.Nodes) == 0 { + return errors.New("no nodes") + } + for i, n := range v.Nodes { + _, err := p2pkey.MakePeerID(n) + if err != nil { + return fmt.Errorf("failed to nop %s: node %d is not valid peer id %s: %w", v.Name, i, n, err) + } + } + + return nil +} + // DonCapabilities is a set of capabilities hosted by a set of node operators // in is in a convenient form to handle the CLO representation of the nop data type DonCapabilities struct { Name string - Nops []*models.NodeOperator // each nop is a node operator and may have multiple nodes + Nops []NOP Capabilities []kcr.CapabilitiesRegistryCapability // every capability is hosted on each nop } -// map the node id to the NOP -func (dc DonCapabilities) nopsByNodeID(chainSelector uint64) (map[string]capabilities_registry.CapabilitiesRegistryNodeOperator, error) { - out := make(map[string]capabilities_registry.CapabilitiesRegistryNodeOperator) - for _, nop := range dc.Nops { - for _, node := range nop.Nodes { - a, err := AdminAddress(node, chainSelector) - if err != nil { - return nil, fmt.Errorf("failed to get admin address for node %s: %w", node.ID, err) - } - out[node.ID] = NodeOperator(nop.Name, a) - +func (v DonCapabilities) Validate() error { + if v.Name == "" { + return errors.New("name is empty") + } + if len(v.Nops) == 0 { + return errors.New("no nops") + } + for i, n := range v.Nops { + if err := n.Validate(); err != nil { + return fmt.Errorf("failed to validate nop %d '%s': %w", i, n.Name, err) } } - return out, nil + if len(v.Capabilities) == 0 { + return errors.New("no capabilities") + } + return nil } func NodeOperator(name string, adminAddress string) capabilities_registry.CapabilitiesRegistryNodeOperator { @@ -232,42 +255,63 @@ func NodeOperator(name string, adminAddress string) capabilities_registry.Capabi } } -func AdminAddress(n *models.Node, chainSel uint64) (string, error) { +func AdminAddress(n *Node, chainSel uint64) (string, error) { cid, err := chainsel.ChainIdFromSelector(chainSel) if err != nil { return "", fmt.Errorf("failed to get chain id from selector %d: %w", chainSel, err) } cidStr := strconv.FormatUint(cid, 10) for _, chain := range n.ChainConfigs { - if chain.Network.ChainID == cidStr { + //TODO validate chainType field + if chain.Chain.Id == cidStr { return chain.AdminAddress, nil } } return "", fmt.Errorf("no chain config for chain %d", cid) } -// helpers to maintain compatibility with the existing registration functions -// nodesToNops converts a list of DonCapabilities to a map of node id to NOP -func nodesToNops(dons []DonCapabilities, chainSel uint64) (map[string]capabilities_registry.CapabilitiesRegistryNodeOperator, error) { - out := make(map[string]capabilities_registry.CapabilitiesRegistryNodeOperator) +func nopsToNodes(donInfos []DonInfo, dons []DonCapabilities, chainSelector uint64) (map[capabilities_registry.CapabilitiesRegistryNodeOperator][]string, error) { + out := make(map[capabilities_registry.CapabilitiesRegistryNodeOperator][]string) for _, don := range dons { - nops, err := don.nopsByNodeID(chainSel) - if err != nil { - return nil, fmt.Errorf("failed to get registry NOPs for don %s: %w", don.Name, err) - } - for donName, nop := range nops { - _, exists := out[donName] - if exists { - continue + for _, nop := range don.Nops { + idx := slices.IndexFunc(donInfos, func(donInfo DonInfo) bool { + return donInfo.Name == don.Name + }) + if idx < 0 { + return nil, fmt.Errorf("couldn't find donInfo for %v", don.Name) + } + donInfo := donInfos[idx] + idx = slices.IndexFunc(donInfo.Nodes, func(node Node) bool { + return node.P2PID == nop.Nodes[0] + }) + if idx < 0 { + return nil, fmt.Errorf("couldn't find node with p2p_id %v", nop.Nodes[0]) + } + node := donInfo.Nodes[idx] + a, err := AdminAddress(&node, chainSelector) + if err != nil { + return nil, fmt.Errorf("failed to get admin address for node %s: %w", node.ID, err) + } + nodeOperator := NodeOperator(nop.Name, a) + for _, node := range nop.Nodes { + + idx = slices.IndexFunc(donInfo.Nodes, func(n Node) bool { + return n.P2PID == node + }) + if idx < 0 { + return nil, fmt.Errorf("couldn't find node with p2p_id %v", node) + } + out[nodeOperator] = append(out[nodeOperator], donInfo.Nodes[idx].ID) + } - out[donName] = nop } } + return out, nil } // mapDonsToCaps converts a list of DonCapabilities to a map of don name to capabilities -func mapDonsToCaps(dons []DonCapabilities) map[string][]kcr.CapabilitiesRegistryCapability { +func mapDonsToCaps(dons []DonInfo) map[string][]kcr.CapabilitiesRegistryCapability { out := make(map[string][]kcr.CapabilitiesRegistryCapability) for _, don := range dons { out[don.Name] = don.Capabilities @@ -277,53 +321,48 @@ func mapDonsToCaps(dons []DonCapabilities) map[string][]kcr.CapabilitiesRegistry // mapDonsToNodes returns a map of don name to simplified representation of their nodes // all nodes must have evm config and ocr3 capability nodes are must also have an aptos chain config -func mapDonsToNodes(dons []DonCapabilities, excludeBootstraps bool, registryChainSel uint64) (map[string][]*ocr2Node, error) { +func mapDonsToNodes(dons []DonInfo, excludeBootstraps bool, registryChainSel uint64) (map[string][]*ocr2Node, error) { donToOcr2Nodes := make(map[string][]*ocr2Node) // get the nodes for each don from the offchain client, get ocr2 config from one of the chain configs for the node b/c // they are equivalent, and transform to ocr2node representation for _, don := range dons { - for _, nop := range don.Nops { - for _, node := range nop.Nodes { - ocr2n, err := newOcr2NodeFromClo(node, registryChainSel) - if err != nil { - return nil, fmt.Errorf("failed to create ocr2 node for node %s: %w", node.ID, err) - } - if excludeBootstraps && ocr2n.IsBoostrap { - continue - } - if _, ok := donToOcr2Nodes[don.Name]; !ok { - donToOcr2Nodes[don.Name] = make([]*ocr2Node, 0) - } - donToOcr2Nodes[don.Name] = append(donToOcr2Nodes[don.Name], ocr2n) - + for _, node := range don.Nodes { + ocr2n, err := newOcr2NodeFromJD(&node, registryChainSel) + if err != nil { + return nil, fmt.Errorf("failed to create ocr2 node for node %s: %w", node.ID, err) } + if excludeBootstraps && ocr2n.IsBoostrap { + continue + } + if _, ok := donToOcr2Nodes[don.Name]; !ok { + donToOcr2Nodes[don.Name] = make([]*ocr2Node, 0) + } + donToOcr2Nodes[don.Name] = append(donToOcr2Nodes[don.Name], ocr2n) } } return donToOcr2Nodes, nil } -func firstChainConfigByType(ccfgs []*models.NodeChainConfig, t chaintype.ChainType) (*v1.ChainConfig, bool) { +func firstChainConfigByType(ccfgs []*v1.ChainConfig, t v1.ChainType) (*v1.ChainConfig, bool) { for _, c := range ccfgs { - //nolint:staticcheck //ignore EqualFold it broke ci for some reason (go version skew btw local and ci?) - if strings.ToLower(c.Network.ChainType.String()) == strings.ToLower(string(t)) { - return chainConfigFromClo(c), true + if c.Chain.Type == t { + return c, true } } return nil, false } -func registryChainConfig(ccfgs []*models.NodeChainConfig, t chaintype.ChainType, sel uint64) (*v1.ChainConfig, error) { +func registryChainConfig(ccfgs []*v1.ChainConfig, t v1.ChainType, sel uint64) (*v1.ChainConfig, error) { chainId, err := chainsel.ChainIdFromSelector(sel) if err != nil { return nil, fmt.Errorf("failed to get chain id from selector %d: %w", sel, err) } chainIdStr := strconv.FormatUint(chainId, 10) for _, c := range ccfgs { - //nolint:staticcheck //ignore EqualFold it broke ci for some reason (go version skew btw local and ci?) - if strings.ToLower(c.Network.ChainType.String()) == strings.ToLower(string(t)) && c.Network.ChainID == chainIdStr { - return chainConfigFromClo(c), nil + if c.Chain.Type == t && c.Chain.Id == chainIdStr { + return c, nil } } return nil, fmt.Errorf("no chain config for chain %d", chainId) @@ -350,7 +389,7 @@ func (d RegisteredDon) signers() []common.Address { return out } -func joinInfoAndNodes(donInfos map[string]kcr.CapabilitiesRegistryDONInfo, dons []DonCapabilities, registryChainSel uint64) ([]RegisteredDon, error) { +func joinInfoAndNodes(donInfos map[string]kcr.CapabilitiesRegistryDONInfo, dons []DonInfo, registryChainSel uint64) ([]RegisteredDon, error) { // all maps should have the same keys nodes, err := mapDonsToNodes(dons, true, registryChainSel) if err != nil { @@ -376,31 +415,6 @@ func joinInfoAndNodes(donInfos map[string]kcr.CapabilitiesRegistryDONInfo, dons return out, nil } -func chainConfigFromClo(chain *models.NodeChainConfig) *v1.ChainConfig { - return &v1.ChainConfig{ - Chain: &v1.Chain{ - Id: chain.Network.ChainID, - Type: v1.ChainType_CHAIN_TYPE_EVM, // TODO: support other chain types - }, - - AccountAddress: chain.AccountAddress, - AdminAddress: chain.AdminAddress, - Ocr2Config: &v1.OCR2Config{ - Enabled: chain.Ocr2Config.Enabled, - P2PKeyBundle: &v1.OCR2Config_P2PKeyBundle{ - PeerId: chain.Ocr2Config.P2pKeyBundle.PeerID, - PublicKey: chain.Ocr2Config.P2pKeyBundle.PublicKey, - }, - OcrKeyBundle: &v1.OCR2Config_OCRKeyBundle{ - BundleId: chain.Ocr2Config.OcrKeyBundle.BundleID, - OnchainSigningAddress: chain.Ocr2Config.OcrKeyBundle.OnchainSigningAddress, - OffchainPublicKey: chain.Ocr2Config.OcrKeyBundle.OffchainPublicKey, - ConfigPublicKey: chain.Ocr2Config.OcrKeyBundle.ConfigPublicKey, - }, - }, - } -} - var emptyAddr = "0x0000000000000000000000000000000000000000" // compute the admin address from the string. If the address is empty, replaces the 0s with fs diff --git a/deployment/keystone/types_test.go b/deployment/keystone/types_test.go index 69b2e39a8f1..925649bba0d 100644 --- a/deployment/keystone/types_test.go +++ b/deployment/keystone/types_test.go @@ -1,19 +1,11 @@ package keystone import ( - "encoding/json" - "os" - "strconv" "testing" "github.com/stretchr/testify/assert" - "github.com/test-go/testify/require" - - chainsel "github.com/smartcontractkit/chain-selectors" v1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node" - "github.com/smartcontractkit/chainlink/deployment/environment/clo/models" - kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" ) @@ -140,271 +132,271 @@ func Test_newOcr2Node(t *testing.T) { } } -func Test_mapDonsToNodes(t *testing.T) { - var ( - pubKey = "03dacd15fc96c965c648e3623180de002b71a97cf6eeca9affb91f461dcd6ce1" - evmSig = "b35409a8d4f9a18da55c5b2bb08a3f5f68d44442" - aptosSig = "b35409a8d4f9a18da55c5b2bb08a3f5f68d44442b35409a8d4f9a18da55c5b2bb08a3f5f68d44442" - peerID = "p2p_12D3KooWMWUKdoAc2ruZf9f55p7NVFj7AFiPm67xjQ8BZBwkqyYv" - // todo: these should be defined in common - writerCap = 3 - ocr3Cap = 2 - registryChainSel = chainsel.ETHEREUM_TESTNET_SEPOLIA.Selector - registryChainID = strconv.FormatUint(chainsel.ETHEREUM_TESTNET_SEPOLIA.EvmChainID, 10) - ) - type args struct { - dons []DonCapabilities - excludeBootstraps bool - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "writer evm only", - args: args{ - dons: []DonCapabilities{ - { - Name: "ok writer", - Nops: []*models.NodeOperator{ - { - Nodes: []*models.Node{ - { - PublicKey: &pubKey, - ChainConfigs: []*models.NodeChainConfig{ - { - ID: "1", - Network: &models.Network{ - ChainType: models.ChainTypeEvm, - ChainID: registryChainID, - }, - Ocr2Config: &models.NodeOCR2Config{ - P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ - PeerID: peerID, - }, - OcrKeyBundle: &models.NodeOCR2ConfigOCRKeyBundle{ - ConfigPublicKey: pubKey, - OffchainPublicKey: pubKey, - OnchainSigningAddress: evmSig, - }, - }, - }, - }, - }, - }, - }, - }, - Capabilities: []kcr.CapabilitiesRegistryCapability{ - { - LabelledName: "writer", - Version: "1", - CapabilityType: uint8(writerCap), - }, - }, - }, - }, - }, - wantErr: false, - }, - { - name: "err if no evm chain", - args: args{ - dons: []DonCapabilities{ - { - Name: "bad chain", - Nops: []*models.NodeOperator{ - { - Nodes: []*models.Node{ - { - PublicKey: &pubKey, - ChainConfigs: []*models.NodeChainConfig{ - { - ID: "1", - Network: &models.Network{ - ChainType: models.ChainTypeSolana, - }, - Ocr2Config: &models.NodeOCR2Config{ - P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ - PeerID: peerID, - }, - OcrKeyBundle: &models.NodeOCR2ConfigOCRKeyBundle{ - ConfigPublicKey: pubKey, - OffchainPublicKey: pubKey, - OnchainSigningAddress: evmSig, - }, - }, - }, - }, - }, - }, - }, - }, - Capabilities: []kcr.CapabilitiesRegistryCapability{ - { - LabelledName: "writer", - Version: "1", - CapabilityType: uint8(writerCap), - }, - }, - }, - }, - }, - wantErr: true, - }, - { - name: "ocr3 cap evm only", - args: args{ - dons: []DonCapabilities{ - { - Name: "bad chain", - Nops: []*models.NodeOperator{ - { - Nodes: []*models.Node{ - { - PublicKey: &pubKey, - ChainConfigs: []*models.NodeChainConfig{ - { - ID: "1", - Network: &models.Network{ - ChainType: models.ChainTypeEvm, - ChainID: registryChainID, - }, - Ocr2Config: &models.NodeOCR2Config{ - P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ - PeerID: peerID, - }, - OcrKeyBundle: &models.NodeOCR2ConfigOCRKeyBundle{ - ConfigPublicKey: pubKey, - OffchainPublicKey: pubKey, - OnchainSigningAddress: evmSig, - }, - }, - }, - }, - }, - }, - }, - }, - Capabilities: []kcr.CapabilitiesRegistryCapability{ - { - LabelledName: "ocr3", - Version: "1", - CapabilityType: uint8(ocr3Cap), - }, - }, - }, - }, - }, - wantErr: false, - }, - { - name: "ocr3 cap evm & aptos", - args: args{ - dons: []DonCapabilities{ - { - Name: "ok chain", - Nops: []*models.NodeOperator{ - { - Nodes: []*models.Node{ - { - PublicKey: &pubKey, - ChainConfigs: []*models.NodeChainConfig{ - { - ID: "1", - Network: &models.Network{ - ChainType: models.ChainTypeEvm, - ChainID: registryChainID, - }, - Ocr2Config: &models.NodeOCR2Config{ - P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ - PeerID: peerID, - }, - OcrKeyBundle: &models.NodeOCR2ConfigOCRKeyBundle{ - ConfigPublicKey: pubKey, - OffchainPublicKey: pubKey, - OnchainSigningAddress: evmSig, - }, - }, - }, - { - ID: "2", - Network: &models.Network{ - ChainType: models.ChainTypeAptos, - }, - Ocr2Config: &models.NodeOCR2Config{ - P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ - PeerID: peerID, - }, - OcrKeyBundle: &models.NodeOCR2ConfigOCRKeyBundle{ - ConfigPublicKey: pubKey, - OffchainPublicKey: pubKey, - OnchainSigningAddress: aptosSig, - }, - }, - }, - }, - }, - }, - }, - }, - Capabilities: []kcr.CapabilitiesRegistryCapability{ - { - LabelledName: "ocr3", - Version: "1", - CapabilityType: uint8(ocr3Cap), - }, - }, - }, - }, - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := mapDonsToNodes(tt.args.dons, tt.args.excludeBootstraps, registryChainSel) - if (err != nil) != tt.wantErr { - t.Errorf("mapDonsToNodes() error = %v, wantErr %v", err, tt.wantErr) - return - } - }) - } - // make sure the clo test data is correct - wfNops := loadTestNops(t, "testdata/workflow_nodes.json") - cwNops := loadTestNops(t, "testdata/chain_writer_nodes.json") - assetNops := loadTestNops(t, "testdata/asset_nodes.json") - require.Len(t, wfNops, 10) - require.Len(t, cwNops, 10) - require.Len(t, assetNops, 16) +// func Test_mapDonsToNodes(t *testing.T) { +// var ( +// pubKey = "03dacd15fc96c965c648e3623180de002b71a97cf6eeca9affb91f461dcd6ce1" +// evmSig = "b35409a8d4f9a18da55c5b2bb08a3f5f68d44442" +// aptosSig = "b35409a8d4f9a18da55c5b2bb08a3f5f68d44442b35409a8d4f9a18da55c5b2bb08a3f5f68d44442" +// peerID = "p2p_12D3KooWMWUKdoAc2ruZf9f55p7NVFj7AFiPm67xjQ8BZBwkqyYv" +// // todo: these should be defined in common +// writerCap = 3 +// ocr3Cap = 2 +// registryChainSel = chainsel.ETHEREUM_TESTNET_SEPOLIA.Selector +// registryChainID = strconv.FormatUint(chainsel.ETHEREUM_TESTNET_SEPOLIA.EvmChainID, 10) +// ) +// type args struct { +// dons []DonCapabilities +// excludeBootstraps bool +// } +// tests := []struct { +// name string +// args args +// wantErr bool +// }{ +// { +// name: "writer evm only", +// args: args{ +// dons: []DonCapabilities{ +// { +// Name: "ok writer", +// Nops: []*models.NodeOperator{ +// { +// Nodes: []*models.Node{ +// { +// PublicKey: &pubKey, +// ChainConfigs: []*models.NodeChainConfig{ +// { +// ID: "1", +// Network: &models.Network{ +// ChainType: models.ChainTypeEvm, +// ChainID: registryChainID, +// }, +// Ocr2Config: &models.NodeOCR2Config{ +// P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ +// PeerID: peerID, +// }, +// OcrKeyBundle: &models.NodeOCR2ConfigOCRKeyBundle{ +// ConfigPublicKey: pubKey, +// OffchainPublicKey: pubKey, +// OnchainSigningAddress: evmSig, +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// Capabilities: []kcr.CapabilitiesRegistryCapability{ +// { +// LabelledName: "writer", +// Version: "1", +// CapabilityType: uint8(writerCap), +// }, +// }, +// }, +// }, +// }, +// wantErr: false, +// }, +// { +// name: "err if no evm chain", +// args: args{ +// dons: []DonCapabilities{ +// { +// Name: "bad chain", +// Nops: []*models.NodeOperator{ +// { +// Nodes: []*models.Node{ +// { +// PublicKey: &pubKey, +// ChainConfigs: []*models.NodeChainConfig{ +// { +// ID: "1", +// Network: &models.Network{ +// ChainType: models.ChainTypeSolana, +// }, +// Ocr2Config: &models.NodeOCR2Config{ +// P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ +// PeerID: peerID, +// }, +// OcrKeyBundle: &models.NodeOCR2ConfigOCRKeyBundle{ +// ConfigPublicKey: pubKey, +// OffchainPublicKey: pubKey, +// OnchainSigningAddress: evmSig, +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// Capabilities: []kcr.CapabilitiesRegistryCapability{ +// { +// LabelledName: "writer", +// Version: "1", +// CapabilityType: uint8(writerCap), +// }, +// }, +// }, +// }, +// }, +// wantErr: true, +// }, +// { +// name: "ocr3 cap evm only", +// args: args{ +// dons: []DonCapabilities{ +// { +// Name: "bad chain", +// Nops: []*models.NodeOperator{ +// { +// Nodes: []*models.Node{ +// { +// PublicKey: &pubKey, +// ChainConfigs: []*models.NodeChainConfig{ +// { +// ID: "1", +// Network: &models.Network{ +// ChainType: models.ChainTypeEvm, +// ChainID: registryChainID, +// }, +// Ocr2Config: &models.NodeOCR2Config{ +// P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ +// PeerID: peerID, +// }, +// OcrKeyBundle: &models.NodeOCR2ConfigOCRKeyBundle{ +// ConfigPublicKey: pubKey, +// OffchainPublicKey: pubKey, +// OnchainSigningAddress: evmSig, +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// Capabilities: []kcr.CapabilitiesRegistryCapability{ +// { +// LabelledName: "ocr3", +// Version: "1", +// CapabilityType: uint8(ocr3Cap), +// }, +// }, +// }, +// }, +// }, +// wantErr: false, +// }, +// { +// name: "ocr3 cap evm & aptos", +// args: args{ +// dons: []DonCapabilities{ +// { +// Name: "ok chain", +// Nops: []*models.NodeOperator{ +// { +// Nodes: []*models.Node{ +// { +// PublicKey: &pubKey, +// ChainConfigs: []*models.NodeChainConfig{ +// { +// ID: "1", +// Network: &models.Network{ +// ChainType: models.ChainTypeEvm, +// ChainID: registryChainID, +// }, +// Ocr2Config: &models.NodeOCR2Config{ +// P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ +// PeerID: peerID, +// }, +// OcrKeyBundle: &models.NodeOCR2ConfigOCRKeyBundle{ +// ConfigPublicKey: pubKey, +// OffchainPublicKey: pubKey, +// OnchainSigningAddress: evmSig, +// }, +// }, +// }, +// { +// ID: "2", +// Network: &models.Network{ +// ChainType: models.ChainTypeAptos, +// }, +// Ocr2Config: &models.NodeOCR2Config{ +// P2pKeyBundle: &models.NodeOCR2ConfigP2PKeyBundle{ +// PeerID: peerID, +// }, +// OcrKeyBundle: &models.NodeOCR2ConfigOCRKeyBundle{ +// ConfigPublicKey: pubKey, +// OffchainPublicKey: pubKey, +// OnchainSigningAddress: aptosSig, +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// Capabilities: []kcr.CapabilitiesRegistryCapability{ +// { +// LabelledName: "ocr3", +// Version: "1", +// CapabilityType: uint8(ocr3Cap), +// }, +// }, +// }, +// }, +// }, +// wantErr: false, +// }, +// } +// for _, tt := range tests { +// t.Run(tt.name, func(t *testing.T) { +// _, err := mapDonsToNodes(tt.args.dons, tt.args.excludeBootstraps, registryChainSel) +// if (err != nil) != tt.wantErr { +// t.Errorf("mapDonsToNodes() error = %v, wantErr %v", err, tt.wantErr) +// return +// } +// }) +// } +// // make sure the clo test data is correct +// wfNops := loadTestNops(t, "testdata/workflow_nodes.json") +// cwNops := loadTestNops(t, "testdata/chain_writer_nodes.json") +// assetNops := loadTestNops(t, "testdata/asset_nodes.json") +// require.Len(t, wfNops, 10) +// require.Len(t, cwNops, 10) +// require.Len(t, assetNops, 16) - wfDon := DonCapabilities{ - Name: WFDonName, - Nops: wfNops, - Capabilities: []kcr.CapabilitiesRegistryCapability{OCR3Cap}, - } - cwDon := DonCapabilities{ - Name: TargetDonName, - Nops: cwNops, - Capabilities: []kcr.CapabilitiesRegistryCapability{WriteChainCap}, - } - assetDon := DonCapabilities{ - Name: StreamDonName, - Nops: assetNops, - Capabilities: []kcr.CapabilitiesRegistryCapability{StreamTriggerCap}, - } - _, err := mapDonsToNodes([]DonCapabilities{wfDon}, false, registryChainSel) - require.NoError(t, err, "failed to map wf don") - _, err = mapDonsToNodes([]DonCapabilities{cwDon}, false, registryChainSel) - require.NoError(t, err, "failed to map cw don") - _, err = mapDonsToNodes([]DonCapabilities{assetDon}, false, registryChainSel) - require.NoError(t, err, "failed to map asset don") -} +// wfDon := DonCapabilities{ +// Name: WFDonName, +// Nops: wfNops, +// Capabilities: []kcr.CapabilitiesRegistryCapability{OCR3Cap}, +// } +// cwDon := DonCapabilities{ +// Name: TargetDonName, +// Nops: cwNops, +// Capabilities: []kcr.CapabilitiesRegistryCapability{WriteChainCap}, +// } +// assetDon := DonCapabilities{ +// Name: StreamDonName, +// Nops: assetNops, +// Capabilities: []kcr.CapabilitiesRegistryCapability{StreamTriggerCap}, +// } +// _, err := mapDonsToNodes([]DonCapabilities{wfDon}, false, registryChainSel) +// require.NoError(t, err, "failed to map wf don") +// _, err = mapDonsToNodes([]DonCapabilities{cwDon}, false, registryChainSel) +// require.NoError(t, err, "failed to map cw don") +// _, err = mapDonsToNodes([]DonCapabilities{assetDon}, false, registryChainSel) +// require.NoError(t, err, "failed to map asset don") +// } -func loadTestNops(t *testing.T, pth string) []*models.NodeOperator { - f, err := os.ReadFile(pth) - require.NoError(t, err) - var nops []*models.NodeOperator - require.NoError(t, json.Unmarshal(f, &nops)) - return nops -} +// func loadTestNops(t *testing.T, pth string) []*models.NodeOperator { +// f, err := os.ReadFile(pth) +// require.NoError(t, err) +// var nops []*models.NodeOperator +// require.NoError(t, json.Unmarshal(f, &nops)) +// return nops +// } diff --git a/docs/CONFIG.md b/docs/CONFIG.md index 7d4c34e7531..47ba5b574cd 100644 --- a/docs/CONFIG.md +++ b/docs/CONFIG.md @@ -10079,6 +10079,7 @@ OCR2CacheTTL = '1m' # Default TxTimeout = '1m' # Default TxRetryTimeout = '10s' # Default TxConfirmTimeout = '30s' # Default +TxRetentionTimeout = '0s' # Default SkipPreflight = true # Default Commitment = 'confirmed' # Default MaxRetries = 0 # Default @@ -10148,6 +10149,12 @@ TxConfirmTimeout = '30s' # Default ``` TxConfirmTimeout is the duration to wait when confirming a tx signature, before discarding as unconfirmed. +### TxRetentionTimeout +```toml +TxRetentionTimeout = '0s' # Default +``` +TxRetentionTimeout is the duration to retain transactions in storage after being marked as finalized or errored. Set to 0 to immediately drop transactions. + ### SkipPreflight ```toml SkipPreflight = true # Default diff --git a/go.mod b/go.mod index c96444a4260..2b6f03333c0 100644 --- a/go.mod +++ b/go.mod @@ -74,15 +74,15 @@ require ( github.com/scylladb/go-reflectx v1.0.1 github.com/shirou/gopsutil/v3 v3.24.3 github.com/shopspring/decimal v1.4.0 - github.com/smartcontractkit/chain-selectors v1.0.27 + github.com/smartcontractkit/chain-selectors v1.0.29 github.com/smartcontractkit/chainlink-automation v0.8.1 github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b - github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 + github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371 github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e github.com/smartcontractkit/chainlink-feeds v0.1.1 github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 - github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930 + github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669 github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8 github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12 github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20241009055228-33d0c0bf38de diff --git a/go.sum b/go.sum index c927d19fcfb..13217384ff6 100644 --- a/go.sum +++ b/go.sum @@ -1072,14 +1072,14 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartcontractkit/chain-selectors v1.0.27 h1:VE/ftX9Aae4gnw67yR1raKi+30iWKL/sWq8uyiLHM8k= -github.com/smartcontractkit/chain-selectors v1.0.27/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= +github.com/smartcontractkit/chain-selectors v1.0.29 h1:aZ9+OoUSMn4nqnissHtDvDoKR7JONfDqTHX3MHYIUIE= +github.com/smartcontractkit/chain-selectors v1.0.29/go.mod h1:xsKM0aN3YGcQKTPRPDDtPx2l4mlTN1Djmg0VVXV40b8= github.com/smartcontractkit/chainlink-automation v0.8.1 h1:sTc9LKpBvcKPc1JDYAmgBc2xpDKBco/Q4h4ydl6+UUU= github.com/smartcontractkit/chainlink-automation v0.8.1/go.mod h1:Iij36PvWZ6blrdC5A/nrQUBuf3MH3JvsBB9sSyc9W08= github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b h1:4kmZtaQ4fXwduHnw9xk5VmiIOW4nHg/Mx6iidlZJt5o= github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b/go.mod h1:4adKaHNaxFsRvV/lYfqtbsWyyvIPUMLR0FdOJN/ljis= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 h1:yJNBWCdNL/X8+wEs3TGTBe9gssMmw5FTFxxrlo+0mVo= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371 h1:vnNqMaAvheZgR8IDMGw0QIV1Qen3XTh7IChwW40SNfU= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f h1:BwrIaQIx5Iy6eT+DfLhFfK2XqjxRm74mVdlX8gbu4dw= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f/go.mod h1:wHtwSR3F1CQSJJZDQKuqaqFYnvkT+kMyget7dl8Clvo= github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e h1:JiETqdNM0bktAUGMc62COwXIaw3rR3M77Me6bBLG0Fg= @@ -1088,8 +1088,8 @@ github.com/smartcontractkit/chainlink-feeds v0.1.1 h1:JzvUOM/OgGQA1sOqTXXl52R6An github.com/smartcontractkit/chainlink-feeds v0.1.1/go.mod h1:55EZ94HlKCfAsUiKUTNI7QlE/3d3IwTlsU3YNa/nBb4= github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 h1:PBUaFfPLm+Efq7H9kdfGBivH+QhJ6vB5EZTR/sCZsxI= github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= -github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930 h1:blu++xbH/NSb+ii5hI4jczwojZ7Hc1ERXjpt/krYy9c= -github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930/go.mod h1:mGmRvlk54ufCufV4EBWizOGtXoXfePoFAuYEVC8EwdY= +github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669 h1:CBQ9ORUtGUvCr3dAm/qjpdHlYuB1SRIwtYw5LV8SLys= +github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669/go.mod h1:mGmRvlk54ufCufV4EBWizOGtXoXfePoFAuYEVC8EwdY= github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8 h1:B4DFdk6MGcQnoCjjMBCx7Z+GWQpxRWJ4O8W/dVJyWGA= github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8/go.mod h1:WkBqgBo+g34Gm5vWkDDl8Fh3Mzd7bF5hXp7rryg0t5o= github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 h1:12ijqMM9tvYVEm+nR826WsrNi6zCKpwBhuApq127wHs= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 1d5211adfe4..aba17e10397 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -34,10 +34,10 @@ require ( github.com/segmentio/ksuid v1.0.4 github.com/shopspring/decimal v1.4.0 github.com/slack-go/slack v0.15.0 - github.com/smartcontractkit/chain-selectors v1.0.27 + github.com/smartcontractkit/chain-selectors v1.0.29 github.com/smartcontractkit/chainlink-automation v0.8.1 github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b - github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 + github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371 github.com/smartcontractkit/chainlink-protos/job-distributor v0.4.0 github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.2 github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.13 @@ -418,7 +418,7 @@ require ( github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e // indirect github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 // indirect - github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930 // indirect + github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669 // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8 // indirect github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 // indirect github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20241009055228-33d0c0bf38de // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 682f4bd70f8..5e6793bbb0f 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1399,14 +1399,14 @@ github.com/slack-go/slack v0.15.0 h1:LE2lj2y9vqqiOf+qIIy0GvEoxgF1N5yLGZffmEZykt0 github.com/slack-go/slack v0.15.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86 h1:qQH6fZZe31nBAG6INHph3z5ysDTPptyu0TR9uoJ1+ok= github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86/go.mod h1:WtWOoVQQEHxRHL2hNmuRrvDfYfQG/CioFNoa9Rr2mBE= -github.com/smartcontractkit/chain-selectors v1.0.27 h1:VE/ftX9Aae4gnw67yR1raKi+30iWKL/sWq8uyiLHM8k= -github.com/smartcontractkit/chain-selectors v1.0.27/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= +github.com/smartcontractkit/chain-selectors v1.0.29 h1:aZ9+OoUSMn4nqnissHtDvDoKR7JONfDqTHX3MHYIUIE= +github.com/smartcontractkit/chain-selectors v1.0.29/go.mod h1:xsKM0aN3YGcQKTPRPDDtPx2l4mlTN1Djmg0VVXV40b8= github.com/smartcontractkit/chainlink-automation v0.8.1 h1:sTc9LKpBvcKPc1JDYAmgBc2xpDKBco/Q4h4ydl6+UUU= github.com/smartcontractkit/chainlink-automation v0.8.1/go.mod h1:Iij36PvWZ6blrdC5A/nrQUBuf3MH3JvsBB9sSyc9W08= github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b h1:4kmZtaQ4fXwduHnw9xk5VmiIOW4nHg/Mx6iidlZJt5o= github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b/go.mod h1:4adKaHNaxFsRvV/lYfqtbsWyyvIPUMLR0FdOJN/ljis= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 h1:yJNBWCdNL/X8+wEs3TGTBe9gssMmw5FTFxxrlo+0mVo= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371 h1:vnNqMaAvheZgR8IDMGw0QIV1Qen3XTh7IChwW40SNfU= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f h1:BwrIaQIx5Iy6eT+DfLhFfK2XqjxRm74mVdlX8gbu4dw= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f/go.mod h1:wHtwSR3F1CQSJJZDQKuqaqFYnvkT+kMyget7dl8Clvo= github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e h1:JiETqdNM0bktAUGMc62COwXIaw3rR3M77Me6bBLG0Fg= @@ -1417,8 +1417,8 @@ github.com/smartcontractkit/chainlink-protos/job-distributor v0.4.0 h1:1xTm8UGeD github.com/smartcontractkit/chainlink-protos/job-distributor v0.4.0/go.mod h1:/dVVLXrsp+V0AbcYGJo3XMzKg3CkELsweA/TTopCsKE= github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 h1:PBUaFfPLm+Efq7H9kdfGBivH+QhJ6vB5EZTR/sCZsxI= github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= -github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930 h1:blu++xbH/NSb+ii5hI4jczwojZ7Hc1ERXjpt/krYy9c= -github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930/go.mod h1:mGmRvlk54ufCufV4EBWizOGtXoXfePoFAuYEVC8EwdY= +github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669 h1:CBQ9ORUtGUvCr3dAm/qjpdHlYuB1SRIwtYw5LV8SLys= +github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669/go.mod h1:mGmRvlk54ufCufV4EBWizOGtXoXfePoFAuYEVC8EwdY= github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8 h1:B4DFdk6MGcQnoCjjMBCx7Z+GWQpxRWJ4O8W/dVJyWGA= github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8/go.mod h1:WkBqgBo+g34Gm5vWkDDl8Fh3Mzd7bF5hXp7rryg0t5o= github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.2 h1:GDGrC5OGiV0RyM1znYWehSQXyZQWTOzrEeJRYmysPCE= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 780b2daf4a6..c89baf21bd9 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -17,7 +17,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/rs/zerolog v1.33.0 github.com/slack-go/slack v0.15.0 - github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 + github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371 github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.13 github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.5 github.com/smartcontractkit/chainlink-testing-framework/wasp v1.50.2 @@ -420,12 +420,12 @@ require ( github.com/shoenig/test v0.6.6 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/smartcontractkit/chain-selectors v1.0.27 // indirect + github.com/smartcontractkit/chain-selectors v1.0.29 // indirect github.com/smartcontractkit/chainlink-automation v0.8.1 // indirect github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e // indirect github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 // indirect - github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930 // indirect + github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669 // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8 // indirect github.com/smartcontractkit/chainlink-testing-framework/lib/grafana v1.50.0 // indirect github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12 // indirect diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 3d849414c2c..f2c309ea33a 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1388,14 +1388,14 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/slack-go/slack v0.15.0 h1:LE2lj2y9vqqiOf+qIIy0GvEoxgF1N5yLGZffmEZykt0= github.com/slack-go/slack v0.15.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= -github.com/smartcontractkit/chain-selectors v1.0.27 h1:VE/ftX9Aae4gnw67yR1raKi+30iWKL/sWq8uyiLHM8k= -github.com/smartcontractkit/chain-selectors v1.0.27/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= +github.com/smartcontractkit/chain-selectors v1.0.29 h1:aZ9+OoUSMn4nqnissHtDvDoKR7JONfDqTHX3MHYIUIE= +github.com/smartcontractkit/chain-selectors v1.0.29/go.mod h1:xsKM0aN3YGcQKTPRPDDtPx2l4mlTN1Djmg0VVXV40b8= github.com/smartcontractkit/chainlink-automation v0.8.1 h1:sTc9LKpBvcKPc1JDYAmgBc2xpDKBco/Q4h4ydl6+UUU= github.com/smartcontractkit/chainlink-automation v0.8.1/go.mod h1:Iij36PvWZ6blrdC5A/nrQUBuf3MH3JvsBB9sSyc9W08= github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b h1:4kmZtaQ4fXwduHnw9xk5VmiIOW4nHg/Mx6iidlZJt5o= github.com/smartcontractkit/chainlink-ccip v0.0.0-20241112095015-3e85d9f1898b/go.mod h1:4adKaHNaxFsRvV/lYfqtbsWyyvIPUMLR0FdOJN/ljis= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6 h1:yJNBWCdNL/X8+wEs3TGTBe9gssMmw5FTFxxrlo+0mVo= -github.com/smartcontractkit/chainlink-common v0.3.1-0.20241112140826-0e2daed34ef6/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371 h1:vnNqMaAvheZgR8IDMGw0QIV1Qen3XTh7IChwW40SNfU= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241113142256-8a7a997a0371/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f h1:BwrIaQIx5Iy6eT+DfLhFfK2XqjxRm74mVdlX8gbu4dw= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f/go.mod h1:wHtwSR3F1CQSJJZDQKuqaqFYnvkT+kMyget7dl8Clvo= github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e h1:JiETqdNM0bktAUGMc62COwXIaw3rR3M77Me6bBLG0Fg= @@ -1404,8 +1404,8 @@ github.com/smartcontractkit/chainlink-feeds v0.1.1 h1:JzvUOM/OgGQA1sOqTXXl52R6An github.com/smartcontractkit/chainlink-feeds v0.1.1/go.mod h1:55EZ94HlKCfAsUiKUTNI7QlE/3d3IwTlsU3YNa/nBb4= github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 h1:PBUaFfPLm+Efq7H9kdfGBivH+QhJ6vB5EZTR/sCZsxI= github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0/go.mod h1:m/A3lqD7ms/RsQ9BT5P2uceYY0QX5mIt4KQxT2G6qEo= -github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930 h1:blu++xbH/NSb+ii5hI4jczwojZ7Hc1ERXjpt/krYy9c= -github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112145241-efd6780f6930/go.mod h1:mGmRvlk54ufCufV4EBWizOGtXoXfePoFAuYEVC8EwdY= +github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669 h1:CBQ9ORUtGUvCr3dAm/qjpdHlYuB1SRIwtYw5LV8SLys= +github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241112213949-65ae13752669/go.mod h1:mGmRvlk54ufCufV4EBWizOGtXoXfePoFAuYEVC8EwdY= github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8 h1:B4DFdk6MGcQnoCjjMBCx7Z+GWQpxRWJ4O8W/dVJyWGA= github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241017135645-176a23722fd8/go.mod h1:WkBqgBo+g34Gm5vWkDDl8Fh3Mzd7bF5hXp7rryg0t5o= github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.2 h1:GDGrC5OGiV0RyM1znYWehSQXyZQWTOzrEeJRYmysPCE= diff --git a/deployment/ccip/changeset/messaging_test.go b/integration-tests/smoke/ccip_messaging_test.go similarity index 71% rename from deployment/ccip/changeset/messaging_test.go rename to integration-tests/smoke/ccip_messaging_test.go index a5fde58742b..55309598c8c 100644 --- a/deployment/ccip/changeset/messaging_test.go +++ b/integration-tests/smoke/ccip_messaging_test.go @@ -1,4 +1,4 @@ -package changeset +package smoke import ( "testing" @@ -6,13 +6,18 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + jobv1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/job" "github.com/smartcontractkit/chainlink/deployment" + ccdeploy "github.com/smartcontractkit/chainlink/deployment/ccip" ccipdeployment "github.com/smartcontractkit/chainlink/deployment/ccip" + "github.com/smartcontractkit/chainlink/deployment/ccip/changeset" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testsetups" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/test-go/testify/require" - "golang.org/x/exp/maps" ) type testCaseSetup struct { @@ -34,12 +39,13 @@ type messagingTestCaseOutput struct { nonce uint64 } -func Test_Messaging(t *testing.T) { - t.Parallel() - +func Test_CCIPMessaging(t *testing.T) { // Setup 2 chains and a single lane. - e := ccipdeployment.NewMemoryEnvironmentWithJobs(t, logger.TestLogger(t), 2, 4) - state, err := ccipdeployment.LoadOnchainState(e.Env) + lggr := logger.TestLogger(t) + ctx := ccdeploy.Context(t) + e, _, _ := testsetups.NewLocalDevEnvironment(t, lggr) + + state, err := ccdeploy.LoadOnchainState(e.Env) require.NoError(t, err) allChainSelectors := maps.Keys(e.Env.Chains) @@ -54,20 +60,37 @@ func Test_Messaging(t *testing.T) { ) tokenConfig := ccipdeployment.NewTestTokenConfig(state.Chains[e.FeedChainSel].USDFeeds) - newAddresses := deployment.NewMemoryAddressBook() - err = ccipdeployment.DeployCCIPContracts(e.Env, newAddresses, ccipdeployment.DeployCCIPContractConfig{ + // Apply migration + output, err := changeset.InitialDeploy(e.Env, ccdeploy.DeployCCIPContractConfig{ HomeChainSel: e.HomeChainSel, FeedChainSel: e.FeedChainSel, ChainsToDeploy: allChainSelectors, TokenConfig: tokenConfig, - MCMSConfig: ccipdeployment.NewTestMCMSConfig(t, e.Env), + MCMSConfig: ccdeploy.NewTestMCMSConfig(t, e.Env), OCRSecrets: deployment.XXXGenerateTestOCRSecrets(), }) require.NoError(t, err) - require.NoError(t, e.Env.ExistingAddresses.Merge(newAddresses)) - state, err = ccipdeployment.LoadOnchainState(e.Env) + require.NoError(t, e.Env.ExistingAddresses.Merge(output.AddressBook)) + // Get new state after migration. + state, err = ccdeploy.LoadOnchainState(e.Env) require.NoError(t, err) + // Ensure capreg logs are up to date. + ccdeploy.ReplayLogs(t, e.Env.Offchain, e.ReplayBlocks) + + // Apply the jobs. + for nodeID, jobs := range output.JobSpecs { + for _, job := range jobs { + // Note these auto-accept + _, err := e.Env.Offchain.ProposeJob(ctx, + &jobv1.ProposeJobRequest{ + NodeId: nodeID, + Spec: job, + }) + require.NoError(t, err) + } + } + // connect a single lane, source to dest require.NoError(t, ccipdeployment.AddLane(e.Env, state, sourceChain, destChain)) @@ -94,6 +117,8 @@ func Test_Messaging(t *testing.T) { }, common.HexToAddress("0xdead"), []byte("hello eoa"), + nil, // default extraArgs + ccipdeployment.EXECUTION_STATE_SUCCESS, // success because offRamp won't call an EOA ) }) @@ -106,6 +131,8 @@ func Test_Messaging(t *testing.T) { }, state.Chains[destChain].FeeQuoter.Address(), []byte("hello FeeQuoter"), + nil, // default extraArgs + ccipdeployment.EXECUTION_STATE_SUCCESS, // success because offRamp won't call a contract not implementing CCIPReceiver ) }) @@ -118,6 +145,8 @@ func Test_Messaging(t *testing.T) { }, state.Chains[destChain].Receiver.Address(), []byte("hello CCIPReceiver"), + nil, // default extraArgs + ccipdeployment.EXECUTION_STATE_SUCCESS, func(t *testing.T) { iter, err := state.Chains[destChain].Receiver.FilterMessageReceived(nil) require.NoError(t, err) @@ -136,17 +165,8 @@ func Test_Messaging(t *testing.T) { }, state.Chains[destChain].Receiver.Address(), []byte("hello CCIPReceiver with low exec gas"), - func(t *testing.T) { - // Message should not be emitted, not enough gas to emit log. - // TODO: this is still returning a log, probably the older one since FAILURE is the execution state. - // Not enough ctx in the message received log to confirm that it's from another test. - // Maybe check the log block number and assert that its < the header before block number from above? - // iter, err := ccipReceiver.FilterMessageReceived(&bind.FilterOpts{ - // Start: headerBefore.Number.Uint64(), - // }) - // require.NoError(t, err) - // require.False(t, iter.Next(), "MessageReceived should not be emitted in this test case since gas is too low") - }, + ccipdeployment.MakeEVMExtraArgsV2(1, false), // 1 gas is too low. + ccipdeployment.EXECUTION_STATE_FAILURE, // state would be failed onchain due to low gas ) }) } @@ -163,6 +183,8 @@ func runMessagingTestCase( tc messagingTestCase, receiver common.Address, msgData []byte, + extraArgs []byte, + expectedExecutionState int, extraAssertions ...func(t *testing.T), ) (out messagingTestCaseOutput) { // check latest nonce @@ -178,7 +200,7 @@ func runMessagingTestCase( Data: msgData, TokenAmounts: nil, FeeToken: common.HexToAddress("0x0"), - ExtraArgs: nil, + ExtraArgs: extraArgs, }) expectedSeqNum := make(map[uint64]uint64) expectedSeqNum[tc.destChain] = seqNum @@ -190,7 +212,17 @@ func runMessagingTestCase( } ccipdeployment.ConfirmCommitForAllWithExpectedSeqNums(tc.t, tc.deployedEnv.Env, tc.onchainState, expectedSeqNum, startBlocks) - ccipdeployment.ConfirmExecWithSeqNrForAll(tc.t, tc.deployedEnv.Env, tc.onchainState, expectedSeqNum, startBlocks) + execStates := ccipdeployment.ConfirmExecWithSeqNrForAll(tc.t, tc.deployedEnv.Env, tc.onchainState, expectedSeqNum, startBlocks) + + require.Equalf( + tc.t, + expectedExecutionState, + execStates[seqNum], + "wrong execution state for seq nr %d, expected %d, got %d", + seqNum, + expectedExecutionState, + execStates[seqNum], + ) // check the sender latestNonce on the dest, should be incremented latestNonce, err = tc.onchainState.Chains[tc.destChain].NonceManager.GetInboundNonce(&bind.CallOpts{ diff --git a/shell.nix b/shell.nix index e3b187dcd96..8d5b4351b25 100644 --- a/shell.nix +++ b/shell.nix @@ -1,7 +1,7 @@ {pkgs, isCrib}: with pkgs; let go = go_1_21; - postgresql = postgresql_14; + postgresql = postgresql_15; nodejs = nodejs-18_x; nodePackages = pkgs.nodePackages.override {inherit nodejs;}; pnpm = pnpm_9;