diff --git a/.changelog/unreleased/features/ibc-integration-test/3455-async-icq-test.md b/.changelog/unreleased/features/ibc-integration-test/3455-async-icq-test.md new file mode 100644 index 0000000000..8be8d1f42b --- /dev/null +++ b/.changelog/unreleased/features/ibc-integration-test/3455-async-icq-test.md @@ -0,0 +1,2 @@ +- Add a test for asynchronous Interchain Query relaying + ([\#3455](https://github.com/informalsystems/hermes/issues/3455)) \ No newline at end of file diff --git a/.changelog/unreleased/features/ibc-integration-test/3778-ordered-channel-timeout.md b/.changelog/unreleased/features/ibc-integration-test/3778-ordered-channel-timeout.md new file mode 100644 index 0000000000..7a46b56ba9 --- /dev/null +++ b/.changelog/unreleased/features/ibc-integration-test/3778-ordered-channel-timeout.md @@ -0,0 +1,2 @@ +- Add an ICA test to assert a channel correctly closes after a packet time-outs + ([\#3778](https://github.com/informalsystems/hermes/issues/3778)) \ No newline at end of file diff --git a/.changelog/unreleased/features/ibc-relayer-cli/3402-lc-refresh.md b/.changelog/unreleased/features/ibc-relayer-cli/3402-lc-refresh.md new file mode 100644 index 0000000000..2e6c95edae --- /dev/null +++ b/.changelog/unreleased/features/ibc-relayer-cli/3402-lc-refresh.md @@ -0,0 +1,3 @@ +- Add a `client_refresh_rate` setting to specify the rate at which to + refresh clients referencing this chain, relative to its trusting period. + ([\#3402](https://github.com/informalsystems/hermes/issues/3402)) \ No newline at end of file diff --git a/.changelog/unreleased/features/ibc-relayer-cli/3672-clears-packet-seq.md b/.changelog/unreleased/features/ibc-relayer-cli/3672-clears-packet-seq.md new file mode 100644 index 0000000000..c721d17bc7 --- /dev/null +++ b/.changelog/unreleased/features/ibc-relayer-cli/3672-clears-packet-seq.md @@ -0,0 +1,18 @@ +- Add a `--packet-sequences` flag to the `clear packets`, `tx packet-recv`, and `tx packet-ack` commands. + When this flag is specified, these commands will only clear the packets with the specified sequence numbers + on the given chain. If not provided, all pending packets will be cleared on both chains, as before. + + This flag takes either a single sequence number or a range of sequences numbers. + Each element of the comma-separated list must be either a single sequence number or + a range of sequence numbers. + + Examples: + - `10` will clear a single packet with sequence nymber `10` + - `1,2,3` will clear packets with sequence numbers `1, 2, 3` + - `1..5` will clear packets with sequence numbers `1, 2, 3, 4, 5` + - `..5` will clear packets with sequence numbers `1, 2, 3, 4, 5` + - `5..` will clear packets with sequence numbers greater than or equal to `5` + - `..5,10..20,25,30..` will clear packets with sequence numbers `1, 2, 3, 4, 5, 10, 11, ..., 20, 25, 30, 31, ...` + - `..5,10..20,25,30..` will clear packets with sequence numbers `1, 2, 3, 4, 5, 10, 11, ..., 20, 25, 30, 31, ...` + + ([\#3672](https://github.com/informalsystems/hermes/issues/3672)) diff --git a/.changelog/unreleased/features/ibc-relayer-cli/3743-query_packets_chunk_size.md b/.changelog/unreleased/features/ibc-relayer-cli/3743-query_packets_chunk_size.md new file mode 100644 index 0000000000..7c8044bbc3 --- /dev/null +++ b/.changelog/unreleased/features/ibc-relayer-cli/3743-query_packets_chunk_size.md @@ -0,0 +1,6 @@ +- Add a `query_packets_chunk_size` config option and a `--query- + packets-chunk-size flag to the `clear packets` CLI to configure how + many packets to query at once from the chain when clearing pending + packets. Lower this setting if one or more of packets you are + trying to clear are huge and make the packet query time out or fail. + ([\#3743](https://github.com/informalsystems/hermes/issues/3743)) diff --git a/.changelog/unreleased/features/ibc-relayer/3766-max-memo-receiver-config.md b/.changelog/unreleased/features/ibc-relayer/3766-max-memo-receiver-config.md new file mode 100644 index 0000000000..aca27309b9 --- /dev/null +++ b/.changelog/unreleased/features/ibc-relayer/3766-max-memo-receiver-config.md @@ -0,0 +1,6 @@ +- Add two new packet configurations: + * `ics20_max_memo_size` which filters ICS20 packets with memo + field bigger than the configured value + * `ics20_max_receiver_size` which filters ICS20 packets with receiver + field bigger than the configured value + ([\#3766](https://github.com/informalsystems/hermes/issues/3766)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/ibc-relayer-cli/3745-compat-0.50.md b/.changelog/unreleased/improvements/ibc-relayer-cli/3745-compat-0.50.md new file mode 100644 index 0000000000..bc891fd71b --- /dev/null +++ b/.changelog/unreleased/improvements/ibc-relayer-cli/3745-compat-0.50.md @@ -0,0 +1,2 @@ +- Update compatibility check to allow IBC-Go 4.1.1 to 8.x and SDK 0.45.x to 0.50.x. + ([\#3745](https://github.com/informalsystems/hermes/issues/3745)) diff --git a/.changelog/v1.7.4/bug-fixes/ibc-relayer-cli/3697-fix-evidence-report.md b/.changelog/v1.7.4/bug-fixes/ibc-relayer-cli/3697-fix-evidence-report.md new file mode 100644 index 0000000000..232aa6c15a --- /dev/null +++ b/.changelog/v1.7.4/bug-fixes/ibc-relayer-cli/3697-fix-evidence-report.md @@ -0,0 +1,4 @@ +- Fix a bug in the `evidence` command which would sometimes + prevent the detected misbehaviour evidence from being submitted, + instead erroring out with a validator set hash mismatch. + ([\#3697](https://github.com/informalsystems/hermes/pull/3697)) diff --git a/.changelog/v1.7.4/bug-fixes/ibc-relayer/3703-avoid-returning-stopped-worker.md b/.changelog/v1.7.4/bug-fixes/ibc-relayer/3703-avoid-returning-stopped-worker.md new file mode 100644 index 0000000000..c2ab504b5d --- /dev/null +++ b/.changelog/v1.7.4/bug-fixes/ibc-relayer/3703-avoid-returning-stopped-worker.md @@ -0,0 +1,3 @@ +- Avoid retrieving a worker which is being removed by the idle worker clean-up + process. + process ([\#3703](https://github.com/informalsystems/hermes/issues/3703)) \ No newline at end of file diff --git a/.changelog/v1.7.4/bug-fixes/ibc-telemetry/3720-fix-broadcasting-errors-metric.md b/.changelog/v1.7.4/bug-fixes/ibc-telemetry/3720-fix-broadcasting-errors-metric.md new file mode 100644 index 0000000000..3da82edaf4 --- /dev/null +++ b/.changelog/v1.7.4/bug-fixes/ibc-telemetry/3720-fix-broadcasting-errors-metric.md @@ -0,0 +1,3 @@ +- Fix the issue where `broadcast_errors` metric would not correctly batch + the same errors together. + together ([\#3720](https://github.com/informalsystems/hermes/issues/3720)) \ No newline at end of file diff --git a/.changelog/v1.7.4/bug-fixes/ibc-telemetry/3723-fix-backlog-metrics.md b/.changelog/v1.7.4/bug-fixes/ibc-telemetry/3723-fix-backlog-metrics.md new file mode 100644 index 0000000000..fa95766987 --- /dev/null +++ b/.changelog/v1.7.4/bug-fixes/ibc-telemetry/3723-fix-backlog-metrics.md @@ -0,0 +1,4 @@ +- Update the values of `backlog` metrics when clearing packets. + Change the `backlog_oldest_timestamp` to `backlog_latest_update_timestamp` + which shows the last time the `backlog` metrics have been updated. + ([\#3723](https://github.com/informalsystems/hermes/issues/3723)) \ No newline at end of file diff --git a/.changelog/v1.7.4/summary.md b/.changelog/v1.7.4/summary.md new file mode 100644 index 0000000000..00b15e27f1 --- /dev/null +++ b/.changelog/v1.7.4/summary.md @@ -0,0 +1,9 @@ +*December 15th, 2023* + +This release improves the monitoring of Hermes instances by fixing the `broadcast_errors` metric so +that it correctly batches the same errors together. It also improves the metrics `backlog_*` by +updating them whenever Hermes queries pending packets. + +This release also improves the reliability of the idle worker clean-up and +fixes a bug within the `evidence` command which would sometimes prevent +the misbehaviour evidence from being reported. diff --git a/.github/codespell/codespell.ini b/.github/codespell/codespell.ini new file mode 100644 index 0000000000..681ebbd0f1 --- /dev/null +++ b/.github/codespell/codespell.ini @@ -0,0 +1,3 @@ +[codespell] +skip = *.js,*.ts,*.css,*.svg,*.html,*.json,./target,./tools/integration-test/data,./tools/check-guide/target,./ci/misbehaviour/data +ignore-words = .github/codespell/words.txt diff --git a/.github/codespell/words.txt b/.github/codespell/words.txt new file mode 100644 index 0000000000..d15ff15549 --- /dev/null +++ b/.github/codespell/words.txt @@ -0,0 +1,4 @@ +crate +shs +ser +numer diff --git a/.github/workflows/cargo-doc.yaml b/.github/workflows/cargo-doc.yaml index cfe2f2468f..ebc03618b6 100644 --- a/.github/workflows/cargo-doc.yaml +++ b/.github/workflows/cargo-doc.yaml @@ -4,7 +4,17 @@ on: push: branches: - master - pull_request: {} + paths: + - .github/workflows/cargo-doc.yml + - Cargo.toml + - Cargo.lock + - crates/** + pull_request: + paths: + - .github/workflows/cargo-doc.yml + - Cargo.toml + - Cargo.lock + - crates/** # Cancel previous runs of this workflow when a new commit is added to the PR, branch or tag concurrency: diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 0000000000..6ad483e50d --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,22 @@ +name: Codespell +on: + pull_request: + push: + branches: master + +# Cancel previous runs of this workflow when a new commit is added to the PR, branch or tag +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + codespell: + name: Check spelling + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: codespell-project/actions-codespell@v2 + with: + skip: '*.js,*.ts,*.css,*.svg,*.html,*.json,./target,./tools/integration-test/data,./tools/check-guide/target,./ci/misbehaviour/data' + ignore_words_file: .github/codespell/words.txt + diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index d46c030263..5b3ac9891e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -18,8 +18,10 @@ jobs: fail-fast: false matrix: platform: - - linux/amd64 - - linux/arm64 + - id: linux/amd64 + name: amd64 + - id: linux/arm64 + name: arm64 steps: - name: Checkout uses: actions/checkout@v4 @@ -49,7 +51,7 @@ jobs: with: context: . file: ./ci/release/hermes.Dockerfile - platforms: ${{ matrix.platform }} + platforms: ${{ matrix.platform.id }} labels: ${{ steps.meta.outputs.labels }} outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true cache-from: type=gha @@ -62,9 +64,9 @@ jobs: touch "/tmp/digests/${digest#sha256:}" - name: Upload digest - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: digests + name: digests-${{ matrix.platform.name }} path: /tmp/digests/* if-no-files-found: error retention-days: 1 @@ -75,9 +77,10 @@ jobs: - docker-build steps: - name: Download digests - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: - name: digests + pattern: digests-* + merge-multiple: true path: /tmp/digests - name: Set up Docker Buildx diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml index 5f4237329e..78ef604845 100644 --- a/.github/workflows/integration.yaml +++ b/.github/workflows/integration.yaml @@ -48,49 +48,63 @@ jobs: - package: gaia13 command: gaiad account_prefix: cosmos + native_token: stake + features: forward-packet,clean-workers - package: gaia14 command: gaiad account_prefix: cosmos - - package: ibc-go-v4-simapp - command: simd - account_prefix: cosmos - - package: ibc-go-v5-simapp - command: simd - account_prefix: cosmos + native_token: stake + features: forward-packet,clean-workers - package: ibc-go-v6-simapp command: simd account_prefix: cosmos + native_token: stake + features: ica,ics29-fee - package: ibc-go-v7-simapp command: simd account_prefix: cosmos + native_token: stake + features: ica,ics29-fee - package: ibc-go-v8-simapp command: simd account_prefix: cosmos + native_token: stake + features: ica,ics29-fee - package: wasmd command: wasmd account_prefix: wasm - - package: evmos - command: evmosd - account_prefix: evmos + native_token: stake + features: '' - package: osmosis command: osmosisd account_prefix: osmo - - package: stride - command: strided - account_prefix: stride + native_token: stake + features: '' - package: juno command: junod account_prefix: juno + native_token: stake + features: juno,forward-packet + - package: provenance + command: provenanced + account_prefix: pb + native_token: nhash + features: fee-grant,async-icq + - package: migaloo + command: migalood + account_prefix: migaloo + native_token: stake + features: '' steps: - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 + - uses: cachix/install-nix-action@v25 with: install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' extra_nix_config: | experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 + - uses: cachix/cachix-action@v14 with: name: cosmos - uses: actions-rs/toolchain@v1 @@ -101,7 +115,7 @@ jobs: - uses: actions-rs/cargo@v1 with: command: test - args: -p ibc-integration-test --no-fail-fast --no-run + args: -p ibc-integration-test --features=${{ matrix.chain.features }} --no-fail-fast --no-run - name: Install cargo-nextest run: curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin - name: Run integration test @@ -112,22 +126,24 @@ jobs: NEXTEST_RETRIES: 2 CHAIN_COMMAND_PATHS: ${{ matrix.chain.command }} ACCOUNT_PREFIXES: ${{ matrix.chain.account_prefix }} + NATIVE_TOKENS: ${{ matrix.chain.native_token }} run: | nix shell .#python .#${{ matrix.chain.package }} -c \ - cargo nextest run -p ibc-integration-test --no-fail-fast --failure-output final --test-threads=2 + cargo nextest run -p ibc-integration-test --no-fail-fast --failure-output final --test-threads=2 \ + --features=${{ matrix.chain.features }} ordered-channel-test: runs-on: ubuntu-20.04 timeout-minutes: 60 steps: - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 + - uses: cachix/install-nix-action@v25 with: install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' extra_nix_config: | experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 + - uses: cachix/cachix-action@v14 with: name: cosmos - uses: actions-rs/toolchain@v1 @@ -151,296 +167,6 @@ jobs: cargo nextest run -p ibc-integration-test --no-fail-fast --failure-output final --test-threads=2 \ --features ordered test_ordered_channel - ica-filter-test: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - chain: - - package: ibc-go-v6-simapp - command: simd - account_prefix: cosmos - - package: ibc-go-v7-simapp - command: simd - account_prefix: cosmos - - package: ibc-go-v8-simapp - command: simd - account_prefix: cosmos - steps: - - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v2 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --no-fail-fast --no-run - - name: Install cargo-nextest - run: curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - NO_COLOR_LOG: 1 - NEXTEST_RETRIES: 2 - CHAIN_COMMAND_PATHS: ${{ matrix.chain.command }} - ACCOUNT_PREFIXES: ${{ matrix.chain.account_prefix }} - run: | - nix shell .#${{ matrix.chain.package }} -c \ - cargo nextest run -p ibc-integration-test --no-fail-fast --failure-output final --test-threads=2 \ - --features ica test_ica_filter - - ics29-fee-test: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - chain: - - package: ibc-go-v5-simapp - command: simd - account_prefix: cosmos - - package: ibc-go-v6-simapp - command: simd - account_prefix: cosmos - - package: ibc-go-v7-simapp - command: simd - account_prefix: cosmos - - package: ibc-go-v8-simapp - command: simd - account_prefix: cosmos - - package: migaloo - command: migalood - account_prefix: migaloo - steps: - - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v2 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --features ics29-fee --no-fail-fast --no-run - - name: Install cargo-nextest - run: curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - NO_COLOR_LOG: 1 - NEXTEST_RETRIES: 2 - CHAIN_COMMAND_PATHS: ${{ matrix.chain.command }} - ACCOUNT_PREFIXES: ${{ matrix.chain.account_prefix }} - run: | - nix shell .#${{ matrix.chain.package }} -c \ - cargo nextest run -p ibc-integration-test --no-fail-fast --failure-output final --test-threads=2 \ - --features ics29-fee fee:: - - forward-packet: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - chain: - - package: gaia13 - command: gaiad - account_prefix: cosmos - - package: gaia14 - command: gaiad - account_prefix: cosmos - - package: juno - command: junod - account_prefix: juno - steps: - - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v2 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --features forward-packet --no-fail-fast --no-run - - name: Install cargo-nextest - run: curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - NO_COLOR_LOG: 1 - NEXTEST_RETRIES: 2 - CHAIN_COMMAND_PATHS: ${{ matrix.chain.command }} - ACCOUNT_PREFIXES: ${{ matrix.chain.account_prefix }} - run: | - nix shell .#${{ matrix.chain.package }} -c \ - cargo nextest run -p ibc-integration-test --no-fail-fast --failure-output final --test-threads=2 \ - --features forward-packet forward:: - - ics31: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - chain: - - package: .#gaia13 .#stride-no-admin - command: gaiad,strided - account_prefix: cosmos,stride - - package: .#gaia14 .#stride-no-admin - command: gaiad,strided - account_prefix: cosmos,stride - steps: - - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v2 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --features ics31 --no-fail-fast --no-run - - name: Install cargo-nextest - run: curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - NO_COLOR_LOG: 1 - NEXTEST_RETRIES: 2 - CHAIN_COMMAND_PATHS: ${{ matrix.chain.command }} - ACCOUNT_PREFIXES: ${{ matrix.chain.account_prefix }} - run: | - nix shell ${{ matrix.chain.package }} -c \ - cargo nextest run -p ibc-integration-test --no-fail-fast --failure-output final --test-threads=2 \ - --features ics31 ics31:: - - fee-grant: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - chain: - - package: stride - command: strided - account_prefix: stride - - package: evmos - command: evmosd - account_prefix: evmos - steps: - - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v2 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --features fee-grant --no-fail-fast --no-run - - name: Install cargo-nextest - run: curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - NO_COLOR_LOG: 1 - CHAIN_COMMAND_PATHS: ${{ matrix.chain.command }} - ACCOUNT_PREFIXES: ${{ matrix.chain.account_prefix }} - run: | - nix shell .#${{ matrix.chain.package }} -c \ - cargo nextest run -p ibc-integration-test --no-fail-fast --failure-output final --test-threads=2 \ - --features fee-grant fee_grant:: - - clean-workers: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - chain: - - package: gaia13 - command: gaiad - account_prefix: cosmos - - package: gaia14 - command: gaiad - account_prefix: cosmos - steps: - - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v2 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --features ics31 --no-fail-fast --no-run - - name: Install cargo-nextest - run: curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - NO_COLOR_LOG: 1 - NEXTEST_RETRIES: 2 - CHAIN_COMMAND_PATHS: ${{ matrix.chain.command }} - ACCOUNT_PREFIXES: ${{ matrix.chain.account_prefix }} - run: | - nix shell .#${{ matrix.chain.package }} -c \ - cargo nextest run -p ibc-integration-test --no-fail-fast --failure-output final --test-threads=2 \ - --features clean-workers clean_workers:: - interchain-security-no-ica: runs-on: ubuntu-20.04 strategy: @@ -455,13 +181,13 @@ jobs: account_prefix: cosmos,neutron steps: - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 + - uses: cachix/install-nix-action@v25 with: install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' extra_nix_config: | experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 + - uses: cachix/cachix-action@v14 with: name: cosmos - uses: actions-rs/toolchain@v1 @@ -501,13 +227,13 @@ jobs: account_prefix: cosmos,stride steps: - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 + - uses: cachix/install-nix-action@v25 with: install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' extra_nix_config: | experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 + - uses: cachix/cachix-action@v14 with: name: cosmos - uses: actions-rs/toolchain@v1 @@ -547,13 +273,13 @@ jobs: account_prefix: cosmos,stride steps: - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 + - uses: cachix/install-nix-action@v25 with: install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' extra_nix_config: | experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 + - uses: cachix/cachix-action@v14 with: name: cosmos - uses: actions-rs/toolchain@v1 @@ -594,13 +320,13 @@ jobs: native_token: utia,stake steps: - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 + - uses: cachix/install-nix-action@v25 with: install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' extra_nix_config: | experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 + - uses: cachix/cachix-action@v14 with: name: cosmos - uses: actions-rs/toolchain@v1 @@ -611,7 +337,7 @@ jobs: - uses: actions-rs/cargo@v1 with: command: test - args: -p ibc-integration-test --features interchain-security --no-fail-fast --no-run + args: -p ibc-integration-test --features celestia --no-fail-fast --no-run - name: Install cargo-nextest run: curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin - env: @@ -636,13 +362,13 @@ jobs: - gaia6 steps: - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 + - uses: cachix/install-nix-action@v25 with: install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' extra_nix_config: | experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 + - uses: cachix/cachix-action@v14 with: name: cosmos - uses: actions-rs/toolchain@v1 diff --git a/.github/workflows/markdown-link-check.yml b/.github/workflows/markdown-link-check.yml index dcbb7298c7..9dbaba343b 100644 --- a/.github/workflows/markdown-link-check.yml +++ b/.github/workflows/markdown-link-check.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v4 - name: Link Checker id: lychee - uses: lycheeverse/lychee-action@v1.8.0 + uses: lycheeverse/lychee-action@v1.9.1 with: args: --verbose --no-progress --max-concurrency 16 './**/*.md' token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/misbehaviour.yml b/.github/workflows/misbehaviour.yml index 62869c4e2f..629ce08faf 100644 --- a/.github/workflows/misbehaviour.yml +++ b/.github/workflows/misbehaviour.yml @@ -49,12 +49,12 @@ jobs: steps: - uses: actions/checkout@v4 - name: Install Nix - uses: cachix/install-nix-action@v24 + uses: cachix/install-nix-action@v25 with: extra_nix_config: | experimental-features = nix-command flakes - name: Use cachix cache - uses: cachix/cachix-action@v13 + uses: cachix/cachix-action@v14 with: name: cosmos - name: Install sconfig @@ -103,12 +103,12 @@ jobs: steps: - uses: actions/checkout@v4 - name: Install Nix - uses: cachix/install-nix-action@v24 + uses: cachix/install-nix-action@v25 with: extra_nix_config: | experimental-features = nix-command flakes - name: Use cachix cache - uses: cachix/cachix-action@v13 + uses: cachix/cachix-action@v14 with: name: cosmos - name: Install sconfig @@ -157,12 +157,12 @@ jobs: steps: - uses: actions/checkout@v4 - name: Install Nix - uses: cachix/install-nix-action@v24 + uses: cachix/install-nix-action@v25 with: extra_nix_config: | experimental-features = nix-command flakes - name: Use cachix cache - uses: cachix/cachix-action@v13 + uses: cachix/cachix-action@v14 with: name: cosmos - name: Install sconfig @@ -212,12 +212,12 @@ jobs: steps: - uses: actions/checkout@v4 - name: Install Nix - uses: cachix/install-nix-action@v24 + uses: cachix/install-nix-action@v25 with: extra_nix_config: | experimental-features = nix-command flakes - name: Use cachix cache - uses: cachix/cachix-action@v13 + uses: cachix/cachix-action@v14 with: name: cosmos - name: Install sconfig diff --git a/.github/workflows/multi-chains.yaml b/.github/workflows/multi-chains.yaml index b1c8138708..63edbede02 100644 --- a/.github/workflows/multi-chains.yaml +++ b/.github/workflows/multi-chains.yaml @@ -70,32 +70,26 @@ jobs: - package: ibc-go-v8-simapp command: simd account_prefix: cosmos - - package: wasmd - command: wasmd - account_prefix: wasm second-package: - - package: evmos - command: evmosd - account_prefix: evmos - package: osmosis command: osmosisd account_prefix: osmo - - package: stride - command: strided - account_prefix: stride - - package: juno - command: junod - account_prefix: juno + - package: migaloo + command: migalood + account_prefix: migaloo + - package: wasmd + command: wasmd + account_prefix: wasm steps: - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 + - uses: cachix/install-nix-action@v25 with: install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' extra_nix_config: | experimental-features = nix-command flakes - - uses: cachix/cachix-action@v13 + - uses: cachix/cachix-action@v14 with: name: cosmos - uses: actions-rs/toolchain@v1 diff --git a/.github/workflows/specs.yml b/.github/workflows/specs.yml deleted file mode 100644 index 1e06c174a5..0000000000 --- a/.github/workflows/specs.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: TLA+ Specs -on: - pull_request: - paths: - - docs/spec/tla/** - push: - branches: master - paths: - - docs/specs/tla/** - -# Cancel previous runs of this workflow when a new commit is added to the PR, branch or tag -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - typecheck-specs: - runs-on: ubuntu-latest - container: apalache/mc:0.15.2 - env: - working-directory: docs/spec/tla - steps: - - uses: actions/checkout@v4 - - name: IBC Core - run: apalache-mc typecheck IBCCore.tla | grep -F 'Type checker [OK]' - working-directory: ${{env.working-directory}}/ibc-core - - name: Fungible Token Transfer - run: apalache-mc typecheck IBCTokenTransfer.tla | grep -F 'Type checker [OK]' - working-directory: ${{env.working-directory}}/fungible-token-transfer - - name: ICS 02 Client / Single Chain - run: apalache-mc typecheck ICS02SingleChainEnvironment.tla | grep -F 'Type checker [OK]' - working-directory: ${{env.working-directory}}/client - - name: ICS 02 Client / Two Chains - run: apalache-mc typecheck ICS02TwoChainsEnvironment.tla | grep -F 'Type checker [OK]' - working-directory: ${{env.working-directory}}/client diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a4c21d3bc..08dad0afd3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,40 @@ # CHANGELOG +## v1.7.4 + +*December 15th, 2023* + +Special thanks to Yun Yeo (@beer-1) for his contributions ([#3697] and [#3703]). + +This release improves the monitoring of Hermes instances by fixing the `broadcast_errors` metric so +that it correctly batches the same errors together. It also improves the metrics `backlog_*` by +updating them whenever Hermes queries pending packets. + +This release also improves the reliability of the idle worker clean-up and +fixes a bug within the `evidence` command which would sometimes prevent +the misbehaviour evidence from being reported. + +### BUG FIXES + +- [Relayer CLI](relayer-cli) + - Fix a bug in the `evidence` command which would sometimes + prevent the detected misbehaviour evidence from being submitted, + instead erroring out with a validator set hash mismatch. + ([\#3697](https://github.com/informalsystems/hermes/pull/3697)) +- [Relayer Library](relayer) + - Avoid retrieving a worker which is being removed by the idle worker clean-up + process. ([\#3703](https://github.com/informalsystems/hermes/issues/3703)) +- [Telemetry & Metrics](telemetry) + - Fix the issue where `broadcast_errors` metric would not correctly batch + the same errors together.([\#3720](https://github.com/informalsystems/hermes/issues/3720)) + - Update the values of `backlog` metrics when clearing packets. + Change the `backlog_oldest_timestamp` to `backlog_latest_update_timestamp` + which shows the last time the `backlog` metrics have been updated. + ([\#3723](https://github.com/informalsystems/hermes/issues/3723)) + +[#3697]: https://github.com/informalsystems/hermes/issues/3697 +[#3703]: https://github.com/informalsystems/hermes/issues/3703 + ## v1.7.3 *November 29th, 2023* @@ -44,7 +79,7 @@ at a different value for each chain, using the new per-chain `clear_interval` se The global `clear_interval` setting is used as a default value if the per-chain setting is not defined. -Additionnaly, operators can now override the CometBFT compatibility mode to be used +Additionally, operators can now override the CometBFT compatibility mode to be used for a chain by using the new `compat_mode` per-chain setting. The main use case for this is to override the automatically detected compatibility mode in case Hermes gets it wrong or encounters a non-standard version number and falls back on the wrong CometBFT version. @@ -430,21 +465,21 @@ This patch release adds support for CometBFT in version checks. *March 27th, 2023* Hermes v1.4.0 brings compatibility with chains based on Tendermint/CometBFT 0.37, -while retaining compatiblity with Tendermint/CometBFT 0.34. This is transparent +while retaining compatibility with Tendermint/CometBFT 0.34. This is transparent and does not require any additional configuration. The relayer now supports ICS consumer chains, which only requires operators to specify the `unbonding_period` parameter in the chain settings. This is only -a temporary requirement, in the future Hermes will seamlessy support consumer +a temporary requirement, in the future Hermes will seamlessly support consumer chains with minimal changes to the configuration. This release also deprecates support for chains based on Cosmos SDK 0.43.x and lower, -and bumps the compatiblity to Cosmos SDK 0.47.x. +and bumps the compatibility to Cosmos SDK 0.47.x. The relayer now also allows operators to filter out packets to relay based on whether or not they contain a fee, and the minimal amount of such fee. Please check the relevant [documentation in the Hermes guide](fee-guide) for more information. -Additionnaly, Hermes now also tracks [metrics for ICS29 fees](fee-metrics). +Additionally, Hermes now also tracks [metrics for ICS29 fees](fee-metrics). This release includes a new `query client status` CLI to quickly check whether a client is active, expired or frozen. @@ -1028,7 +1063,7 @@ This is the third release candidate for Hermes v1.0.0 🎉 - Release version 0.18.0 of `ibc-telemetry` -#### IMROVEMENTS +#### IMPROVEMENTS - Improve the metrics - Renamed `oldest_sequence` metric to `backlog_oldest_sequence` @@ -1733,7 +1768,7 @@ Before running Hermes v0.11.0, make sure you remove the `mode.packets.filter` op and consensus states ([#1481](https://github.com/informalsystems/hermes/issues/1481)) - More structural logging in relayer, using tracing spans and key-value pairs. ([#1491](https://github.com/informalsystems/hermes/pull/1491)) - - Improved documention w.r.t. keys for Ethermint-based chains + - Improved documentation w.r.t. keys for Ethermint-based chains ([#1785](https://github.com/informalsystems/hermes/issues/1785)) - [Relayer CLI](crates/relayer-cli) - Add custom options to the `create client` command. @@ -2126,7 +2161,7 @@ This release also fixes a bug where the chain runtime within the relayer would c This release of Hermes is the first to be compatible with the development version of Cosmos SDK 0.43. Hermes 0.7.0 also improves the performance and reliability of the relayer, notably by waiting asynchronously for transactions to be confirmed. -Additionnally, Hermes now includes a REST server which exposes the relayer's internal state over HTTP. +Additionally, Hermes now includes a REST server which exposes the relayer's internal state over HTTP. ### BUG FIXES @@ -2725,7 +2760,7 @@ This release also finalizes the initial implementation of all the ICS 004 handle - Fix for chains that don't have `cosmos` account prefix ([#416]) - Fix for building the `trusted_validator_set` for the header used in client updates ([#770]) - Don't send `MsgAcknowledgment` if channel is closed ([#675]) - - Fix a bug where the keys addresses had their account prefix overriden by the prefix in the configuration ([#751]) + - Fix a bug where the keys addresses had their account prefix overridden by the prefix in the configuration ([#751]) - [ibc-relayer-cli] - Hermes guide: improved installation guideline ([#672]) @@ -2863,7 +2898,7 @@ Noteworthy changes in this release include: ### FEATURES -- Continous Integration (CI) end-to-end (e2e) testing with gaia v4 ([#32], [#582], [#602]) +- Continuous Integration (CI) end-to-end (e2e) testing with gaia v4 ([#32], [#582], [#602]) - Add support for streamlining releases ([#507]) - [ibc-relayer-cli] @@ -3020,7 +3055,7 @@ Special thanks to external contributors for this release: @CharlyCst ([#102], [# - CLI for client update message ([#277]) - Implement the relayer CLI for connection handshake messages ([#358], [#359], [#360]) - Implement the relayer CLI for channel handshake messages ([#371], [#372], [#373], [#374]) - - Added basic client, connection, and channel lifecyle in relayer v0 ([#376], [#377], [#378]) + - Added basic client, connection, and channel lifecycle in relayer v0 ([#376], [#377], [#378]) - Implement commands to add and list keys for a chain ([#363]) - Allow overriding of peer_id, height and hash in light add command ([#428]) - [proto-compiler] diff --git a/Cargo.lock b/Cargo.lock index fbf19aa589..04d229a5dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,7 +35,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a3473aa652e90865a06b723102aaa4a54a7d9f2092dbf4582497a61d0537d3f" dependencies = [ "ident_case", - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", "synstructure", @@ -241,7 +241,7 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -321,7 +321,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -438,7 +438,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25f9db3b38af870bf7e5cc649167533b493928e50744e2c30ae350230b414670" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -449,9 +449,9 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -460,9 +460,9 @@ version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -604,6 +604,12 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" +[[package]] +name = "bech32" +version = "0.10.0-beta" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98f7eed2b2781a6f0b5c903471d48e15f56fb4e1165df8a9a2337fd1a59d45ea" + [[package]] name = "bincode" version = "1.3.3" @@ -633,18 +639,28 @@ dependencies = [ [[package]] name = "bitcoin" -version = "0.30.2" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1945a5048598e4189e239d3f809b19bdad4845c4b2ba400d304d2dcf26d2c462" +checksum = "fd00f3c09b5f21fb357abe32d29946eb8bb7a0862bae62c0b5e4a692acbbe73c" dependencies = [ - "bech32 0.9.1", - "bitcoin-private", - "bitcoin_hashes", + "bech32 0.10.0-beta", + "bitcoin-internals", + "bitcoin_hashes 0.13.0", + "hex-conservative", "hex_lit", "secp256k1", "serde", ] +[[package]] +name = "bitcoin-internals" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" +dependencies = [ + "serde", +] + [[package]] name = "bitcoin-private" version = "0.1.0" @@ -658,6 +674,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d7066118b13d4b20b23645932dfb3a81ce7e29f95726c2036fa33cd7b092501" dependencies = [ "bitcoin-private", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" +dependencies = [ + "bitcoin-internals", + "hex-conservative", "serde", ] @@ -762,7 +788,7 @@ dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", "proc-macro-crate 0.1.5", - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "syn 1.0.109", ] @@ -772,7 +798,7 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -783,7 +809,7 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -989,7 +1015,7 @@ checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck", "proc-macro-error", - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -1067,7 +1093,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1d1429e3bd78171c65aa010eabcdf8f863ba3254728dbfb0ad4b1545beac15c" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -1109,12 +1135,11 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c3242926edf34aec4ac3a77108ad4854bffaa2e4ddc1824124ce59231302d5" +checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.17", + "crossbeam-utils 0.8.19", ] [[package]] @@ -1125,7 +1150,7 @@ checksum = "2d2fe95351b870527a5d09bf563ed3c97c0cffb87cf1c78a591bf48bb218d9aa" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.17", + "crossbeam-utils 0.8.19", "memoffset", ] @@ -1142,12 +1167,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.17" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crunchy" @@ -1200,9 +1222,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1357,7 +1379,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -1368,7 +1390,7 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -1433,9 +1455,9 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1757,9 +1779,9 @@ version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1854,7 +1876,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "729f9bd3449d77e7831a18abfb7ba2f99ee813dfd15b8c2167c9a54ba20aa99d" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -1947,6 +1969,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-conservative" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ed443af458ccb6d81c1e7e661545f94d3176752fb1df2f543b902a1e0f51e2" + [[package]] name = "hex_lit" version = "0.1.1" @@ -2096,7 +2124,7 @@ dependencies = [ [[package]] name = "ibc-chain-registry" -version = "0.26.3" +version = "0.26.4" dependencies = [ "async-trait", "flex-error", @@ -2115,8 +2143,9 @@ dependencies = [ [[package]] name = "ibc-integration-test" -version = "0.26.3" +version = "0.26.4" dependencies = [ + "byte-unit", "http", "ibc-relayer", "ibc-relayer-types", @@ -2125,8 +2154,10 @@ dependencies = [ "serde", "serde_json", "tempfile", + "tendermint", + "tendermint-rpc", "time", - "toml 0.7.8", + "toml 0.8.8", "tonic", ] @@ -2149,7 +2180,7 @@ dependencies = [ [[package]] name = "ibc-relayer" -version = "0.26.3" +version = "0.26.4" dependencies = [ "anyhow", "astria-core", @@ -2160,7 +2191,7 @@ dependencies = [ "bs58", "byte-unit", "bytes", - "crossbeam-channel 0.5.9", + "crossbeam-channel 0.5.11", "digest 0.10.7", "dirs-next", "ed25519", @@ -2216,7 +2247,7 @@ dependencies = [ "tiny-keccak", "tokio", "tokio-stream", - "toml 0.7.8", + "toml 0.8.8", "tonic", "tracing", "tracing-subscriber 0.3.18", @@ -2225,14 +2256,14 @@ dependencies = [ [[package]] name = "ibc-relayer-cli" -version = "1.7.3" +version = "1.7.4" dependencies = [ "abscissa_core", "clap", "clap_complete", "color-eyre", "console", - "crossbeam-channel 0.5.9", + "crossbeam-channel 0.5.11", "dialoguer", "dirs-next", "eyre", @@ -2266,22 +2297,22 @@ dependencies = [ [[package]] name = "ibc-relayer-rest" -version = "0.26.3" +version = "0.26.4" dependencies = [ "axum", - "crossbeam-channel 0.5.9", + "crossbeam-channel 0.5.11", "ibc-relayer", "ibc-relayer-types", "reqwest", "serde", "tokio", - "toml 0.7.8", + "toml 0.8.8", "tracing", ] [[package]] name = "ibc-relayer-types" -version = "0.26.3" +version = "0.26.4" dependencies = [ "bytes", "derive_more", @@ -2312,7 +2343,7 @@ dependencies = [ [[package]] name = "ibc-telemetry" -version = "0.26.3" +version = "0.26.4" dependencies = [ "axum", "dashmap", @@ -2331,10 +2362,10 @@ dependencies = [ [[package]] name = "ibc-test-framework" -version = "0.26.3" +version = "0.26.4" dependencies = [ "color-eyre", - "crossbeam-channel 0.5.9", + "crossbeam-channel 0.5.11", "eyre", "flex-error", "hdpath", @@ -2356,7 +2387,7 @@ dependencies = [ "subtle-encoding", "tendermint-rpc", "tokio", - "toml 0.7.8", + "toml 0.8.8", "tonic", "tracing", "tracing-subscriber 0.3.18", @@ -2709,7 +2740,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -2978,7 +3009,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49e30813093f757be5cf21e50389a24dc7dbb22c49f23b7e8f51d69b508a5ffa" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -3021,9 +3052,9 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8017ec3548ffe7d4cef7ac0e12b044c01164a74c0f3119420faeaf13490ad8b" dependencies = [ - "crossbeam-channel 0.5.9", + "crossbeam-channel 0.5.11", "crossbeam-epoch", - "crossbeam-utils 0.8.17", + "crossbeam-utils 0.8.19", "once_cell", "parking_lot", "quanta", @@ -3093,7 +3124,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -3231,7 +3262,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b3a2a91fdbfdd4d212c0dcc2ab540de2c2bcbbd90be17de7a7daf8822d010c1" dependencies = [ "async-trait", - "crossbeam-channel 0.5.9", + "crossbeam-channel 0.5.11", "dashmap", "fnv", "futures-channel", @@ -3283,7 +3314,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ "proc-macro-crate 2.0.0", - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -3390,7 +3421,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5aa52829b8decbef693af90202711348ab001456803ba2a98eb4ec8fb70844c" dependencies = [ "peg-runtime", - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", ] @@ -3681,9 +3712,9 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -3802,8 +3833,8 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ - "proc-macro2 1.0.70", - "syn 2.0.41", + "proc-macro2 1.0.76", + "syn 2.0.48", ] [[package]] @@ -3843,7 +3874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", "version_check", @@ -3855,7 +3886,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "version_check", ] @@ -3871,9 +3902,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.70" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" dependencies = [ "unicode-ident", ] @@ -3930,7 +3961,7 @@ dependencies = [ "prost 0.12.3", "prost-types", "regex", - "syn 2.0.41", + "syn 2.0.48", "tempfile", "which", ] @@ -3943,7 +3974,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", ] @@ -3956,9 +3987,9 @@ checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" dependencies = [ "anyhow", "itertools 0.11.0", - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -3993,7 +4024,7 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" dependencies = [ - "crossbeam-utils 0.8.17", + "crossbeam-utils 0.8.19", "libc", "mach2", "once_cell", @@ -4005,11 +4036,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", ] [[package]] @@ -4402,11 +4433,11 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.27.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" +checksum = "3f622567e3b4b38154fb8190bcf6b160d7a4301d70595a49195b48c116007a27" dependencies = [ - "bitcoin_hashes", + "bitcoin_hashes 0.12.0", "rand", "secp256k1-sys", "serde", @@ -4414,9 +4445,9 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.8.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" +checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" dependencies = [ "cc", ] @@ -4465,9 +4496,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.193" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" dependencies = [ "serde_derive", ] @@ -4493,13 +4524,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -4529,9 +4560,9 @@ version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -4588,9 +4619,9 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -4792,10 +4823,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck", - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "rustversion", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -4825,18 +4856,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.41" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c8b28c477cc3bf0e7966561e3460130e1255f7a1cf71931075f1c5e7a7e269" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "unicode-ident", ] @@ -4853,7 +4884,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", "syn 1.0.109", "unicode-xid 0.2.4", @@ -5105,9 +5136,9 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ba277e77219e9eea169e8508942db1bf5d8a41ff2db9b20aab5a5aadc9fa25d" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -5131,9 +5162,9 @@ version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -5250,9 +5281,9 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -5301,14 +5332,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.8" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.19.15", + "toml_edit 0.21.0", ] [[package]] @@ -5322,24 +5353,24 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.15" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ "indexmap 2.1.0", - "serde", - "serde_spanned", "toml_datetime", "winnow", ] [[package]] name = "toml_edit" -version = "0.20.7" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ "indexmap 2.1.0", + "serde", + "serde_spanned", "toml_datetime", "winnow", ] @@ -5425,9 +5456,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -5754,9 +5785,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", "wasm-bindgen-shared", ] @@ -5788,9 +5819,9 @@ version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6110,9 +6141,9 @@ version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -6130,7 +6161,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.70", + "proc-macro2 1.0.76", "quote", - "syn 2.0.41", + "syn 2.0.48", ] diff --git a/ci/misbehaviour-ics/double_sign_test.sh b/ci/misbehaviour-ics/double_sign_test.sh index 6c4bbf50b6..3685868433 100644 --- a/ci/misbehaviour-ics/double_sign_test.sh +++ b/ci/misbehaviour-ics/double_sign_test.sh @@ -330,7 +330,7 @@ do interchain-security-cd genesis add-genesis-account $CONS_ACCOUNT_ADDR2 $USER_COINS --home ${CONS_NODE_DIR} sleep 10 - ### this probably doesnt have to be done for each node + ### this probably does not have to be done for each node # Add consumer genesis states to genesis file RPC_LADDR_PORT=$(($RPC_LADDR_BASEPORT + $index)) RPC_LADDR=tcp://${NODE_IP}:${RPC_LADDR_PORT} diff --git a/ci/misbehaviour/config.toml b/ci/misbehaviour/config.toml index 4db30c36be..92845f89ed 100644 --- a/ci/misbehaviour/config.toml +++ b/ci/misbehaviour/config.toml @@ -136,7 +136,7 @@ account_prefix = 'cosmos' # Specify the name of the private key to use for signing transactions. Required # See the Adding Keys chapter for more information about managing signing keys: -# https://hermes.informal.systems/commands/keys/index.html#adding-keys +# https://hermes.informal.systems/documentation/commands/keys/index.html#adding-keys key_name = 'testkey' # Specify the address type which determines: @@ -294,7 +294,7 @@ memo_prefix = '' # [chains.packet_filter.min_fees.'channel-0'] # recv = [ { amount = 20, denom = 'stake' }, { amount = 10, denom = 'uatom' } ] -# Specify that the transaction fees should be payed from this fee granter's account. +# Specify that the transaction fees should be paid from this fee granter's account. # Optional. If unspecified (the default behavior), then no fee granter is used, and # the account specified in `key_name` will pay the tx fees for all transactions # submitted to this chain. diff --git a/ci/misbehaviour/config_fork.toml b/ci/misbehaviour/config_fork.toml index a8ff8ee836..060dda1566 100644 --- a/ci/misbehaviour/config_fork.toml +++ b/ci/misbehaviour/config_fork.toml @@ -135,7 +135,7 @@ account_prefix = 'cosmos' # Specify the name of the private key to use for signing transactions. Required # See the Adding Keys chapter for more information about managing signing keys: -# https://hermes.informal.systems/commands/keys/index.html#adding-keys +# https://hermes.informal.systems/documentation/commands/keys/index.html#adding-keys key_name = 'testkey' # Specify the address type which determines: @@ -293,7 +293,7 @@ memo_prefix = '' # [chains.packet_filter.min_fees.'channel-0'] # recv = [ { amount = 20, denom = 'stake' }, { amount = 10, denom = 'uatom' } ] -# Specify that the transaction fees should be payed from this fee granter's account. +# Specify that the transaction fees should be paid from this fee granter's account. # Optional. If unspecified (the default behavior), then no fee granter is used, and # the account specified in `key_name` will pay the tx fees for all transactions # submitted to this chain. diff --git a/config.toml b/config.toml index 002ed1b75c..9ff0aa5ed3 100644 --- a/config.toml +++ b/config.toml @@ -79,6 +79,20 @@ tx_confirmation = false # [Default: false] auto_register_counterparty_payee = false +# Set the maximum size for the memo field in ICS20 packets. +# If the size of the memo field is bigger than the configured +# one, the packet will not be relayed. +# The filter can be disabled by setting `enabled = false`. +# [Default: "32KiB"] +#ics20_max_memo_size = { enabled = true, size = "32KiB" } + +# Set the maximum size for the receiver field in ICS20 packets. +# If the size of the receiver field is bigger than the configured +# one, the packet will not be relayed. +# The filter can be disabled by setting `enabled = false`. +# [Default: "2KiB"] +#ics20_max_receiver_size = { enabled = true, size = "2KiB" } + # The REST section defines parameters for Hermes' built-in RESTful API. # https://hermes.informal.systems/rest.html [rest] @@ -144,7 +158,7 @@ id = 'ibc-0' type = "CosmosSdk" # Whether or not this is a CCV consumer chain. Default: false -# Only specifiy true for CCV consumer chain, but NOT for sovereign chains. +# Only specify true for CCV consumer chain, but NOT for sovereign chains. ccv_consumer_chain = false # Specify the RPC address and port where the chain RPC server listens on. Required @@ -209,7 +223,7 @@ account_prefix = 'cosmos' # Specify the name of the private key to use for signing transactions. Required # See the Adding Keys chapter for more information about managing signing keys: -# https://hermes.informal.systems/commands/keys/index.html#adding-keys +# https://hermes.informal.systems/documentation/commands/keys/index.html#adding-keys key_name = 'testkey' # Specify the folder used to store the keys. Optional @@ -261,7 +275,7 @@ default_gas = 100000 # Specify the maximum amount of gas to be used as the gas limit for a transaction. # If `default_gas` is unspecified, then `max_gas` will be used as `default_gas`. # Default: 400 000 -max_gas = 400000 +max_gas = 4000000 # Specify the price per gas used of the fee to submit a transaction and # the denomination of the fee. @@ -296,6 +310,10 @@ max_msg_num = 30 # Default: 2097152 (2 MiB) max_tx_size = 2097152 +# How many packets to fetch at once from the chain when clearing packets. +# Default: 50 +query_packets_chunk_size = 50 + # Specify the maximum amount of time to tolerate a clock drift. # The clock drift parameter defines how much new (untrusted) header's time # can drift into the future. Default: 5s @@ -314,14 +332,23 @@ max_block_time = '30s' # Specify the amount of time to be used as the light client trusting period. # It should be significantly less than the unbonding period # (e.g. unbonding period = 3 weeks, trusting period = 2 weeks). +# # Default: 2/3 of the `unbonding period` for Cosmos SDK chains trusting_period = '14days' +# The rate at which to refresh the client referencing this chain, +# expressed as a fraction of the trusting period. +# +# Default: 1/3 (ie. three times per trusting period) +client_refresh_rate = '1/3' + # Specify the trust threshold for the light client, ie. the minimum fraction of validators # which must overlap across two blocks during light client verification. -# Default: { numerator = '2', denominator = '3' }, ie. 2/3. +# # Warning: This is an advanced feature! Modify with caution. -trust_threshold = { numerator = '2', denominator = '3' } +# +# Default: 2/3 +trust_threshold = '2/3' # Specify a string that Hermes will use as a memo for each transaction it submits # to this chain. The string is limited to 50 characters. Default: '' (empty). @@ -374,7 +401,7 @@ memo_prefix = '' # [chains.packet_filter.min_fees.'channel-0'] # recv = [ { amount = 20, denom = 'stake' }, { amount = 10, denom = 'uatom' } ] -# Specify that the transaction fees should be payed from this fee granter's account. +# Specify that the transaction fees should be paid from this fee granter's account. # Optional. If unspecified (the default behavior), then no fee granter is used, and # the account specified in `key_name` will pay the tx fees for all transactions # submitted to this chain. @@ -405,7 +432,7 @@ account_prefix = 'cosmos' key_name = 'testkey' store_prefix = 'ibc' default_gas = 100000 -max_gas = 400000 +max_gas = 4000000 gas_price = { price = 0.025, denom = 'stake' } gas_multiplier = 1.1 max_msg_num = 30 @@ -413,5 +440,5 @@ max_tx_size = 2097152 clock_drift = '5s' max_block_time = '30s' trusting_period = '14days' -trust_threshold = { numerator = '2', denominator = '3' } +trust_threshold = '2/3' address_type = { derivation = 'cosmos' } diff --git a/crates/chain-registry/Cargo.toml b/crates/chain-registry/Cargo.toml index 1e7c8ebc2e..5724404d1f 100644 --- a/crates/chain-registry/Cargo.toml +++ b/crates/chain-registry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-chain-registry" -version = "0.26.3" +version = "0.26.4" edition = "2021" license = "Apache-2.0" keywords = ["cosmos", "ibc", "relayer", "chain", "registry"] @@ -12,7 +12,7 @@ description = """ """ [dependencies] -ibc-relayer-types = { version = "0.26.3", path = "../relayer-types" } +ibc-relayer-types = { version = "0.26.4", path = "../relayer-types" } ibc-proto = { version = "0.39.0", features = ["serde"] } tendermint-rpc = { version = "0.34.0", features = ["http-client", "websocket-client"] } @@ -22,7 +22,7 @@ futures = { version = "0.3.27", features = ["executor"] } http = "0.2" itertools = "0.10.5" reqwest = { version = "0.11.13", features = ["rustls-tls", "json"], default-features = false } -serde = "1.0.166" +serde = "1.0.195" serde_json = "1" tokio = "1.17.0" tracing = "0.1.36" diff --git a/crates/chain-registry/src/chain.rs b/crates/chain-registry/src/chain.rs index bb6d96d2a8..5649d597a5 100644 --- a/crates/chain-registry/src/chain.rs +++ b/crates/chain-registry/src/chain.rs @@ -157,8 +157,8 @@ pub struct Grpc { } impl Fetchable for ChainData { - fn path(ressource: &str) -> PathBuf { - [ressource, "chain.json"].iter().collect() + fn path(resource: &str) -> PathBuf { + [resource, "chain.json"].iter().collect() } } diff --git a/crates/chain-registry/src/querier.rs b/crates/chain-registry/src/querier.rs index 1b1d1d3798..4864fac5ac 100644 --- a/crates/chain-registry/src/querier.rs +++ b/crates/chain-registry/src/querier.rs @@ -50,7 +50,7 @@ pub trait QueryTypes { #[async_trait] /// `QueryContext` represents the basic expectations for a query pub trait QueryContext: QueryTypes { - /// Return an error specific to the query which is retured when `query_healthy` fails + /// Return an error specific to the query which is returned when `query_healthy` fails /// /// # Arguments /// diff --git a/crates/relayer-cli/Cargo.toml b/crates/relayer-cli/Cargo.toml index ce1e1eab0c..f7532782af 100644 --- a/crates/relayer-cli/Cargo.toml +++ b/crates/relayer-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-relayer-cli" -version = "1.7.3" +version = "1.7.4" edition = "2021" license = "Apache-2.0" readme = "README.md" @@ -25,17 +25,17 @@ telemetry = ["ibc-relayer/telemetry", "ibc-telemetry"] rest-server = ["ibc-relayer-rest"] [dependencies] -ibc-relayer-types = { version = "0.26.3", path = "../relayer-types" } -ibc-relayer = { version = "0.26.3", path = "../relayer" } -ibc-telemetry = { version = "0.26.3", path = "../telemetry", optional = true } -ibc-relayer-rest = { version = "0.26.3", path = "../relayer-rest", optional = true } -ibc-chain-registry = { version = "0.26.3" , path = "../chain-registry" } +ibc-relayer-types = { version = "0.26.4", path = "../relayer-types" } +ibc-relayer = { version = "0.26.4", path = "../relayer" } +ibc-telemetry = { version = "0.26.4", path = "../telemetry", optional = true } +ibc-relayer-rest = { version = "0.26.4", path = "../relayer-rest", optional = true } +ibc-chain-registry = { version = "0.26.4" , path = "../chain-registry" } clap = { version = "3.2", features = ["cargo"] } clap_complete = "3.2" color-eyre = "0.6" console = "0.15.5" -crossbeam-channel = "0.5.8" +crossbeam-channel = "0.5.11" dialoguer = "0.10.3" dirs-next = "2.0.0" eyre = "0.6.8" @@ -72,6 +72,6 @@ features = ["options"] [dev-dependencies] abscissa_core = { version = "=0.6.0", features = ["testing"] } -once_cell = "1.17" +once_cell = "1.19" regex = "1.9" serial_test = "2.0.0" diff --git a/crates/relayer-cli/src/chain_registry.rs b/crates/relayer-cli/src/chain_registry.rs index 76581df43e..94e09d8d8a 100644 --- a/crates/relayer-cli/src/chain_registry.rs +++ b/crates/relayer-cli/src/chain_registry.rs @@ -33,6 +33,7 @@ use ibc_relayer::{ MaxMsgNum, MaxTxSize, Memo, + TrustThreshold, }, AddressType, ChainConfig, @@ -41,7 +42,6 @@ use ibc_relayer::{ }, keyring::Store, }; -use tendermint_light_client_verifier::types::TrustThreshold; use tendermint_rpc::Url; use tokio::task::{ JoinError, @@ -168,9 +168,11 @@ where max_msg_num: MaxMsgNum::default(), max_tx_size: MaxTxSize::default(), max_grpc_decoding_size: default::max_grpc_decoding_size(), + query_packets_chunk_size: default::query_packets_chunk_size(), clock_drift: default::clock_drift(), max_block_time: default::max_block_time(), trusting_period: None, + client_refresh_rate: default::client_refresh_rate(), ccv_consumer_chain: false, memo_prefix: Memo::default(), proof_specs: Default::default(), diff --git a/crates/relayer-cli/src/cli_utils.rs b/crates/relayer-cli/src/cli_utils.rs index 50fc3693b8..6b3722ce3d 100644 --- a/crates/relayer-cli/src/cli_utils.rs +++ b/crates/relayer-cli/src/cli_utils.rs @@ -77,7 +77,7 @@ pub fn spawn_chain_runtime(config: &Config, chain_id: &ChainId) -> Result(config, chain_id) } -/// Spawns a chain runtime for the chain in the configuraiton identified by the given chain identifier. +/// Spawns a chain runtime for the chain in the configuration identified by the given chain identifier. /// /// The `Handle` type parameter allows choosing which kind of [`ChainHandle`] implementation to use. /// diff --git a/crates/relayer-cli/src/commands/clear.rs b/crates/relayer-cli/src/commands/clear.rs index cddac6952f..83ca60c562 100644 --- a/crates/relayer-cli/src/commands/clear.rs +++ b/crates/relayer-cli/src/commands/clear.rs @@ -1,3 +1,5 @@ +use std::ops::RangeInclusive; + use abscissa_core::{ clap::Parser, config::Override, @@ -16,12 +18,16 @@ use ibc_relayer::{ Link, LinkParameters, }, + util::seq_range::parse_seq_range, }; use ibc_relayer_types::{ - core::ics24_host::identifier::{ - ChainId, - ChannelId, - PortId, + core::{ + ics04_channel::packet::Sequence, + ics24_host::identifier::{ + ChainId, + ChannelId, + PortId, + }, }, events::IbcEvent, }; @@ -71,17 +77,36 @@ pub struct ClearPacketsCmd { )] channel_id: ChannelId, + #[clap( + long = "packet-sequences", + help = "Sequences of packets to be cleared on the specified chain. \ + Either a single sequence or a range of sequences can be specified. \ + If not provided, all pending packets will be cleared on both chains. \ + Each element of the comma-separated list must be either a single \ + sequence or a range of sequences. \ + Example: `1,10..20` will clear packets with sequences 1, 10, 11, ..., 20", + value_delimiter = ',', + value_parser = parse_seq_range + )] + packet_sequences: Vec>, + #[clap( long = "key-name", - help = "use the given signing key for the specified chain (default: `key_name` config)" + help = "Use the given signing key for the specified chain (default: `key_name` config)" )] key_name: Option, #[clap( long = "counterparty-key-name", - help = "use the given signing key for the counterparty chain (default: `counterparty_key_name` config)" + help = "Use the given signing key for the counterparty chain (default: `counterparty_key_name` config)" )] counterparty_key_name: Option, + + #[clap( + long = "query-packets-chunk-size", + help = "Number of packets to fetch at once from the chain (default: `query_packets_chunk_size` config)" + )] + query_packets_chunk_size: Option, } impl Override for ClearPacketsCmd { @@ -131,12 +156,25 @@ impl Runnable for ClearPacketsCmd { } } + // If `query_packets_chunk_size` is provided, overwrite the chain's + // `query_packets_chunk_size` parameter + if let Some(chunk_size) = self.query_packets_chunk_size { + match chains.src.config() { + Ok(mut src_chain_cfg) => { + src_chain_cfg.set_query_packets_chunk_size(chunk_size); + } + Err(e) => Output::error(e).exit(), + } + } + let mut ev_list = vec![]; // Construct links in both directions. let opts = LinkParameters { src_port_id: self.port_id.clone(), src_channel_id: self.channel_id.clone(), + max_memo_size: config.mode.packets.ics20_max_memo_size, + max_receiver_size: config.mode.packets.ics20_max_receiver_size, }; let fwd_link = match Link::new_from_opts(chains.src.clone(), chains.dst, opts, false, false) @@ -150,22 +188,28 @@ impl Runnable for ClearPacketsCmd { Err(e) => Output::error(e).exit(), }; - // Schedule RecvPacket messages for pending packets in both directions. + // Schedule RecvPacket messages for pending packets in both directions or, + // if packet sequences are provided, only on the specified chain. // This may produce pending acks which will be processed in the next phase. run_and_collect_events("forward recv and timeout", &mut ev_list, || { - fwd_link.relay_recv_packet_and_timeout_messages() - }); - run_and_collect_events("reverse recv and timeout", &mut ev_list, || { - rev_link.relay_recv_packet_and_timeout_messages() + fwd_link.relay_recv_packet_and_timeout_messages(self.packet_sequences.clone()) }); + if self.packet_sequences.is_empty() { + run_and_collect_events("reverse recv and timeout", &mut ev_list, || { + rev_link.relay_recv_packet_and_timeout_messages(vec![]) + }); + } - // Schedule AckPacket messages in both directions. - run_and_collect_events("forward ack", &mut ev_list, || { - fwd_link.relay_ack_packet_messages() - }); + // Schedule AckPacket messages in both directions or, if packet sequences are provided, + // only on the specified chain. run_and_collect_events("reverse ack", &mut ev_list, || { - rev_link.relay_ack_packet_messages() + rev_link.relay_ack_packet_messages(self.packet_sequences.clone()) }); + if self.packet_sequences.is_empty() { + run_and_collect_events("forward ack", &mut ev_list, || { + fwd_link.relay_ack_packet_messages(vec![]) + }); + } Output::success(ev_list).exit() } @@ -186,14 +230,15 @@ mod tests { use std::str::FromStr; use abscissa_core::clap::Parser; - use ibc_relayer_types::core::ics24_host::identifier::{ - ChainId, - ChannelId, - PortId, + use ibc_relayer_types::core::{ + ics04_channel::packet::Sequence, + ics24_host::identifier::{ + ChainId, + ChannelId, + PortId, + }, }; - use super::ClearPacketsCmd; - #[test] fn test_clear_packets_required_only() { assert_eq!( @@ -201,8 +246,10 @@ mod tests { chain_id: ChainId::from_string("chain_id"), port_id: PortId::from_str("port_id").unwrap(), channel_id: ChannelId::from_str("channel-07").unwrap(), + packet_sequences: vec![], key_name: None, counterparty_key_name: None, + query_packets_chunk_size: None }, ClearPacketsCmd::parse_from([ "test", @@ -223,8 +270,10 @@ mod tests { chain_id: ChainId::from_string("chain_id"), port_id: PortId::from_str("port_id").unwrap(), channel_id: ChannelId::from_str("channel-07").unwrap(), + packet_sequences: vec![], key_name: None, - counterparty_key_name: None + counterparty_key_name: None, + query_packets_chunk_size: None }, ClearPacketsCmd::parse_from([ "test", @@ -238,6 +287,37 @@ mod tests { ) } + #[test] + fn test_clear_packets_sequences() { + assert_eq!( + ClearPacketsCmd { + chain_id: ChainId::from_string("chain_id"), + port_id: PortId::from_str("port_id").unwrap(), + channel_id: ChannelId::from_str("channel-07").unwrap(), + packet_sequences: vec![ + Sequence::from(1)..=Sequence::from(1), + Sequence::from(10)..=Sequence::from(20) + ], + key_name: Some("key_name".to_owned()), + counterparty_key_name: None, + query_packets_chunk_size: None + }, + ClearPacketsCmd::parse_from([ + "test", + "--chain", + "chain_id", + "--port", + "port_id", + "--channel", + "channel-07", + "--packet-sequences", + "1,10..20", + "--key-name", + "key_name" + ]) + ) + } + #[test] fn test_clear_packets_key_name() { assert_eq!( @@ -245,8 +325,10 @@ mod tests { chain_id: ChainId::from_string("chain_id"), port_id: PortId::from_str("port_id").unwrap(), channel_id: ChannelId::from_str("channel-07").unwrap(), + packet_sequences: vec![], key_name: Some("key_name".to_owned()), counterparty_key_name: None, + query_packets_chunk_size: None }, ClearPacketsCmd::parse_from([ "test", @@ -269,8 +351,10 @@ mod tests { chain_id: ChainId::from_string("chain_id"), port_id: PortId::from_str("port_id").unwrap(), channel_id: ChannelId::from_str("channel-07").unwrap(), + packet_sequences: vec![], key_name: None, counterparty_key_name: Some("counterparty_key_name".to_owned()), + query_packets_chunk_size: None }, ClearPacketsCmd::parse_from([ "test", @@ -286,6 +370,34 @@ mod tests { ) } + #[test] + fn test_clear_packets_query_packets_chunk_size() { + assert_eq!( + ClearPacketsCmd { + chain_id: ChainId::from_string("chain_id"), + port_id: PortId::from_str("port_id").unwrap(), + channel_id: ChannelId::from_str("channel-07").unwrap(), + packet_sequences: vec![], + key_name: None, + counterparty_key_name: Some("counterparty_key_name".to_owned()), + query_packets_chunk_size: Some(100), + }, + ClearPacketsCmd::parse_from([ + "test", + "--chain", + "chain_id", + "--port", + "port_id", + "--channel", + "channel-07", + "--counterparty-key-name", + "counterparty_key_name", + "--query-packets-chunk-size", + "100" + ]) + ) + } + #[test] fn test_clear_packets_no_chan() { assert!(ClearPacketsCmd::try_parse_from([ diff --git a/crates/relayer-cli/src/commands/config/validate.rs b/crates/relayer-cli/src/commands/config/validate.rs index 64b1d29cfe..3970e33c63 100644 --- a/crates/relayer-cli/src/commands/config/validate.rs +++ b/crates/relayer-cli/src/commands/config/validate.rs @@ -46,7 +46,7 @@ impl Runnable for ValidateCmd { // No need to output the underlying error, this is done already when the application boots. // See `application::CliApp::after_config`. match config.validate_config() { - Ok(_) => Output::success("configuration is valid").exit(), + Ok(_) => Output::success_msg("configuration is valid").exit(), Err(_) => Output::error("configuration is invalid").exit(), } } diff --git a/crates/relayer-cli/src/commands/create/channel.rs b/crates/relayer-cli/src/commands/create/channel.rs index e8c0c80d7c..01b26dd79e 100644 --- a/crates/relayer-cli/src/commands/create/channel.rs +++ b/crates/relayer-cli/src/commands/create/channel.rs @@ -46,7 +46,7 @@ use crate::{ }; static PROMPT: &str = "Are you sure you want a new connection & clients to be created? Hermes will use default security parameters."; -static HINT: &str = "Consider using the default invocation\n\nhermes create channel --a-port --b-port --a-chain --a-connection \n\nto re-use a pre-existing connection."; +static HINT: &str = "Consider using the default invocation\n\nhermes create channel --a-port --b-port --a-chain --a-connection \n\nto reuse a pre-existing connection."; /// The data structure that represents all the possible options when invoking /// the `create channel` CLI command. @@ -55,7 +55,7 @@ static HINT: &str = "Consider using the default invocation\n\nhermes create chan /// /// `create channel --a-port --b-port --a-chain --a-connection ` /// is the default way in which this command should be used, specifying a `Connection-ID` -/// associated with chain A for this new channel to re-use. +/// associated with chain A for this new channel to reuse. /// /// `create channel --a-port --b-port --a-chain --b-chain --new-client-connection` /// can alternatively be used to indicate that a new connection/client pair is being diff --git a/crates/relayer-cli/src/commands/query/packet/pending.rs b/crates/relayer-cli/src/commands/query/packet/pending.rs index 267990f790..c10898cd45 100644 --- a/crates/relayer-cli/src/commands/query/packet/pending.rs +++ b/crates/relayer-cli/src/commands/query/packet/pending.rs @@ -1,3 +1,5 @@ +use core::fmt; + use abscissa_core::{ clap::Parser, Command, @@ -9,7 +11,10 @@ use ibc_relayer::chain::{ pending_packet_summary, PendingPackets, }, - handle::BaseChainHandle, + handle::{ + BaseChainHandle, + ChainHandle, + }, }; use ibc_relayer_types::core::ics24_host::identifier::{ ChainId, @@ -30,8 +35,15 @@ use crate::{ /// at both ends of a channel. #[derive(Debug, Serialize)] struct Summary

{ + /// Source chain + src_chain: ChainId, + + /// Destination chain + dst_chain: ChainId, + /// The packets sent on the source chain as identified by the command. src: P, + /// The packets sent on the counterparty chain. dst: P, } @@ -39,12 +51,47 @@ struct Summary

{ impl Summary { fn collate(self) -> Summary { Summary { + src_chain: self.src_chain, + dst_chain: self.dst_chain, + src: CollatedPendingPackets::new(self.src), dst: CollatedPendingPackets::new(self.dst), } } } +impl fmt::Display for Summary { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "Summary of pending packets:")?; + + writeln!(f, "Packets pending on source chain ({}):", self.src_chain)?; + writeln!(f, " Unreceived packets:")?; + for seq in &self.src.unreceived_packets { + writeln!(f, " {}", seq)?; + } + writeln!(f, " Unreceived acks:")?; + for seq in &self.src.unreceived_acks { + writeln!(f, " {}", seq)?; + } + + writeln!( + f, + "Packets pending on destination chain ({}):", + self.dst_chain + )?; + writeln!(f, " Unreceived packets:")?; + for seq in &self.dst.unreceived_packets { + writeln!(f, " {}", seq)?; + } + writeln!(f, " Unreceived acks:")?; + for seq in &self.dst.unreceived_acks { + writeln!(f, " {}", seq)?; + } + + Ok(()) + } +} + /// This command does the following: /// /// 1. queries the chain to get its counterparty chain, channel and port identifiers (needed in 2) @@ -113,6 +160,8 @@ impl QueryPendingPacketsCmd { .map_err(Error::supervisor)?; Ok(Summary { + src_chain: chains.src.id(), + dst_chain: chains.dst.id(), src: src_summary, dst: dst_summary, }) @@ -125,7 +174,7 @@ impl Runnable for QueryPendingPacketsCmd { match self.execute() { Ok(summary) if json() => Output::success(summary).exit(), - Ok(summary) => Output::success(summary.collate()).exit(), + Ok(summary) => Output::success_msg(summary.collate().to_string()).exit(), Err(e) => Output::error(e).exit(), } } diff --git a/crates/relayer-cli/src/commands/start.rs b/crates/relayer-cli/src/commands/start.rs index 5832f64caa..a0a17cea19 100644 --- a/crates/relayer-cli/src/commands/start.rs +++ b/crates/relayer-cli/src/commands/start.rs @@ -180,7 +180,7 @@ fn spawn_rest_server(config: &Config) -> Option { ); if let Err(e) = handle.await { - error!("REST service crashed with errror: {e}"); + error!("REST service crashed with error: {e}"); } } Err(e) => { @@ -235,7 +235,7 @@ fn spawn_telemetry_server(config: &Config) { info!("telemetry service running, exposing metrics at http://{addr}/metrics"); if let Err(e) = handle.await { - error!("telemetry service crashed with errror: {e}"); + error!("telemetry service crashed with error: {e}"); } } Err(e) => error!("telemetry service failed to start: {e}"), diff --git a/crates/relayer-cli/src/commands/tx/packet.rs b/crates/relayer-cli/src/commands/tx/packet.rs index fcfb0cff40..302a609e1d 100644 --- a/crates/relayer-cli/src/commands/tx/packet.rs +++ b/crates/relayer-cli/src/commands/tx/packet.rs @@ -1,3 +1,5 @@ +use std::ops::RangeInclusive; + use abscissa_core::{ clap::Parser, Command, @@ -9,10 +11,12 @@ use ibc_relayer::{ Link, LinkParameters, }, + util::seq_range::parse_seq_range, }; use ibc_relayer_types::{ core::{ ics02_client::height::Height, + ics04_channel::packet::Sequence, ics24_host::identifier::{ ChainId, ChannelId, @@ -68,6 +72,19 @@ pub struct TxPacketRecvCmd { )] src_channel_id: ChannelId, + #[clap( + long = "packet-sequences", + help = "Sequences of packets to be cleared on `dst-chain`. \ + Either a single sequence or a range of sequences can be specified. \ + If not provided, all pending recv or timeout packets will be cleared. \ + Each element of the comma-separated list must be either a single \ + sequence or a range of sequences. \ + Example: `1,10..20` will clear packets with sequences 1, 10, 11, ..., 20", + value_delimiter = ',', + value_parser = parse_seq_range + )] + packet_sequences: Vec>, + #[clap( long = "packet-data-query-height", help = "Exact height at which the packet data is queried via block_results RPC" @@ -87,6 +104,8 @@ impl Runnable for TxPacketRecvCmd { let opts = LinkParameters { src_port_id: self.src_port_id.clone(), src_channel_id: self.src_channel_id.clone(), + max_memo_size: config.mode.packets.ics20_max_memo_size, + max_receiver_size: config.mode.packets.ics20_max_receiver_size, }; let link = match Link::new_from_opts(chains.src, chains.dst, opts, false, false) { Ok(link) => link, @@ -99,6 +118,7 @@ impl Runnable for TxPacketRecvCmd { let res: Result, Error> = link .relay_recv_packet_and_timeout_messages_with_packet_data_query_height( + self.packet_sequences.clone(), packet_data_query_height, ) .map_err(Error::link); @@ -149,6 +169,19 @@ pub struct TxPacketAckCmd { )] src_channel_id: ChannelId, + #[clap( + long = "packet-sequences", + help = "Sequences of packets to be cleared on `dst-chain`. \ + Either a single sequence or a range of sequences can be specified. \ + If not provided, all pending ack packets will be cleared. \ + Each element of the comma-separated list must be either a single \ + sequence or a range of sequences. \ + Example: `1,10..20` will clear packets with sequences 1, 10, 11, ..., 20", + value_delimiter = ',', + value_parser = parse_seq_range + )] + packet_sequences: Vec>, + #[clap( long = "packet-data-query-height", help = "Exact height at which the packet data is queried via block_results RPC" @@ -168,6 +201,8 @@ impl Runnable for TxPacketAckCmd { let opts = LinkParameters { src_port_id: self.src_port_id.clone(), src_channel_id: self.src_channel_id.clone(), + max_memo_size: config.mode.packets.ics20_max_memo_size, + max_receiver_size: config.mode.packets.ics20_max_receiver_size, }; let link = match Link::new_from_opts(chains.src, chains.dst, opts, false, false) { Ok(link) => link, @@ -179,7 +214,10 @@ impl Runnable for TxPacketAckCmd { .map(|height| Height::new(link.a_to_b.src_chain().id().version(), height).unwrap()); let res: Result, Error> = link - .relay_ack_packet_messages_with_packet_data_query_height(packet_data_query_height) + .relay_ack_packet_messages_with_packet_data_query_height( + self.packet_sequences.clone(), + packet_data_query_height, + ) .map_err(Error::link); match res { @@ -213,6 +251,7 @@ mod tests { src_chain_id: ChainId::from_string("chain_sender"), src_port_id: PortId::from_str("port_sender").unwrap(), src_channel_id: ChannelId::from_str("channel_sender").unwrap(), + packet_sequences: vec![], packet_data_query_height: None }, TxPacketRecvCmd::parse_from([ @@ -237,6 +276,7 @@ mod tests { src_chain_id: ChainId::from_string("chain_sender"), src_port_id: PortId::from_str("port_sender").unwrap(), src_channel_id: ChannelId::from_str("channel_sender").unwrap(), + packet_sequences: vec![], packet_data_query_height: None }, TxPacketRecvCmd::parse_from([ @@ -260,6 +300,7 @@ mod tests { src_chain_id: ChainId::from_string("chain_sender"), src_port_id: PortId::from_str("port_sender").unwrap(), src_channel_id: ChannelId::from_str("channel_sender").unwrap(), + packet_sequences: vec![], packet_data_query_height: Some(5), }, TxPacketRecvCmd::parse_from([ @@ -342,6 +383,7 @@ mod tests { src_chain_id: ChainId::from_string("chain_sender"), src_port_id: PortId::from_str("port_sender").unwrap(), src_channel_id: ChannelId::from_str("channel_sender").unwrap(), + packet_sequences: vec![], packet_data_query_height: None }, TxPacketAckCmd::parse_from([ @@ -366,6 +408,7 @@ mod tests { src_chain_id: ChainId::from_string("chain_sender"), src_port_id: PortId::from_str("port_sender").unwrap(), src_channel_id: ChannelId::from_str("channel_sender").unwrap(), + packet_sequences: vec![], packet_data_query_height: None }, TxPacketAckCmd::parse_from([ diff --git a/crates/relayer-rest/Cargo.toml b/crates/relayer-rest/Cargo.toml index e7e2426cdb..a5e7066646 100644 --- a/crates/relayer-rest/Cargo.toml +++ b/crates/relayer-rest/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-relayer-rest" -version = "0.26.3" +version = "0.26.4" authors = ["Informal Systems "] edition = "2021" license = "Apache-2.0" @@ -14,8 +14,8 @@ description = """ """ [dependencies] -ibc-relayer-types = { version = "0.26.3", path = "../relayer-types" } -ibc-relayer = { version = "0.26.3", path = "../relayer" } +ibc-relayer-types = { version = "0.26.4", path = "../relayer-types" } +ibc-relayer = { version = "0.26.4", path = "../relayer" } crossbeam-channel = "0.5" serde = "1.0" @@ -25,4 +25,4 @@ tokio = "1.26" [dev-dependencies] reqwest = { version = "0.11.16", features = ["json"], default-features = false } -toml = "0.7.3" +toml = "0.8.8" diff --git a/crates/relayer-rest/tests/mock.rs b/crates/relayer-rest/tests/mock.rs index 39cb6fd96f..38ef39e7fe 100644 --- a/crates/relayer-rest/tests/mock.rs +++ b/crates/relayer-rest/tests/mock.rs @@ -73,7 +73,7 @@ async fn version() { let rest_api_version = VersionInfo { name: "ibc-relayer-rest".to_string(), - version: "0.26.3".to_string(), + version: "0.26.4".to_string(), }; let result: JsonResult<_, ()> = JsonResult::Success(vec![version.clone(), rest_api_version]); diff --git a/crates/relayer-types/Cargo.toml b/crates/relayer-types/Cargo.toml index 75119e8b08..8ca2f9f7f2 100644 --- a/crates/relayer-types/Cargo.toml +++ b/crates/relayer-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-relayer-types" -version = "0.26.3" +version = "0.26.4" edition = "2021" license = "Apache-2.0" readme = "README.md" @@ -60,6 +60,6 @@ optional = true env_logger = "0.10.0" tracing = { version = "0.1.36", default-features = false } tracing-subscriber = { version = "0.3.14", features = ["fmt", "env-filter", "json"] } -test-log = { version = "0.2.10", features = ["trace"] } +test-log = { version = "0.2.14", features = ["trace"] } tendermint-rpc = { version = "0.34.0", features = ["http-client", "websocket-client"] } tendermint-testgen = { version = "0.34.0" } # Needed for generating (synthetic) light blocks. diff --git a/crates/relayer-types/src/applications/ics31_icq/events.rs b/crates/relayer-types/src/applications/ics31_icq/events.rs index 43e2dcbcbc..b02b9fb272 100644 --- a/crates/relayer-types/src/applications/ics31_icq/events.rs +++ b/crates/relayer-types/src/applications/ics31_icq/events.rs @@ -113,7 +113,7 @@ fn fetch_first_element_from_events( let res = block_events .get(key) .ok_or_else(|| Error::event(format!("attribute not found for key: {key}")))? - .get(0) + .first() .ok_or_else(|| { Error::event(format!( "element at position 0, of attribute with key `{key}`, not found" diff --git a/crates/relayer-types/src/applications/transfer/acknowledgement.rs b/crates/relayer-types/src/applications/transfer/acknowledgement.rs index 3695e374f8..047b0404a4 100644 --- a/crates/relayer-types/src/applications/transfer/acknowledgement.rs +++ b/crates/relayer-types/src/applications/transfer/acknowledgement.rs @@ -71,8 +71,8 @@ mod test { #[test] fn test_ack_ser() { fn ser_json_assert_eq(ack: Acknowledgement, json_str: &str) { - let ser = serde_json::to_string(&ack).unwrap(); - assert_eq!(ser, json_str) + let set = serde_json::to_string(&ack).unwrap(); + assert_eq!(set, json_str) } ser_json_assert_eq(Acknowledgement::success(), r#"{"result":"AQ=="}"#); diff --git a/crates/relayer-types/src/applications/transfer/denom.rs b/crates/relayer-types/src/applications/transfer/denom.rs index 7d4650ef2e..4ceb009d4b 100644 --- a/crates/relayer-types/src/applications/transfer/denom.rs +++ b/crates/relayer-types/src/applications/transfer/denom.rs @@ -213,7 +213,7 @@ pub fn is_receiver_chain_source( // A: sender chain in this transfer, port "transfer" and channel "c2b" (to B) // B: receiver chain in this transfer, port "transfer" and channel "c2a" (to A) // - // If B had originally sent the token in a previous tranfer, then A would have stored the token as + // If B had originally sent the token in a previous transfer, then A would have stored the token as // "transfer/c2b/{token_denom}". Now, A is sending to B, so to check if B is the source of the token, // we need to check if the token starts with "transfer/c2b". let prefix = TracePrefix::new(source_port, source_channel); diff --git a/crates/relayer-types/src/clients/ics07_tendermint/client_state.rs b/crates/relayer-types/src/clients/ics07_tendermint/client_state.rs index 119e31dba5..bf544dd6b8 100644 --- a/crates/relayer-types/src/clients/ics07_tendermint/client_state.rs +++ b/crates/relayer-types/src/clients/ics07_tendermint/client_state.rs @@ -157,11 +157,6 @@ impl ClientState { }) } - /// Get the refresh time to ensure the state does not expire - pub fn refresh_time(&self) -> Option { - Some(2 * self.trusting_period / 3) - } - /// Helper method to produce a [`Options`] struct for use in /// Tendermint-specific light client verification. pub fn as_light_client_options(&self) -> Options { diff --git a/crates/relayer-types/src/core/ics02_client/consensus_state.rs b/crates/relayer-types/src/core/ics02_client/consensus_state.rs index f9a0d840e4..7d6ef73467 100644 --- a/crates/relayer-types/src/core/ics02_client/consensus_state.rs +++ b/crates/relayer-types/src/core/ics02_client/consensus_state.rs @@ -18,7 +18,7 @@ use crate::{ /// to verify new commits & state roots. /// /// Users are not expected to implement sealed::ErasedPartialEqConsensusState. -/// Effectively, that trait bound mandates implementors to derive PartialEq, +/// Effectively, that trait bound mandates implementers to derive PartialEq, /// after which our blanket implementation will implement /// `ErasedPartialEqConsensusState` for their type. pub trait ConsensusState: Clone + Debug + Send + Sync // Any: From, diff --git a/crates/relayer-types/src/core/ics02_client/error.rs b/crates/relayer-types/src/core/ics02_client/error.rs index a1ce13b4c7..16c76f80c1 100644 --- a/crates/relayer-types/src/core/ics02_client/error.rs +++ b/crates/relayer-types/src/core/ics02_client/error.rs @@ -172,7 +172,7 @@ define_error! { InvalidStringAsHeight { value: String } [ HeightError ] - | e | { format_args!("String {0} cannnot be converted to height", e.value) }, + | e | { format_args!("String {0} cannot be converted to height", e.value) }, InvalidHeight | _ | { "revision height cannot be zero" }, @@ -257,7 +257,7 @@ define_error! { update_time: Timestamp, } | e | { - format_args!("header not withing trusting period: expires_at={0} now={1}", e.latest_time, e.update_time) + format_args!("header not within trusting period: expires_at={0} now={1}", e.latest_time, e.update_time) }, MissingLocalConsensusState diff --git a/crates/relayer-types/src/core/ics02_client/msgs/create_client.rs b/crates/relayer-types/src/core/ics02_client/msgs/create_client.rs index 41e607bc6c..1c3e4b8b21 100644 --- a/crates/relayer-types/src/core/ics02_client/msgs/create_client.rs +++ b/crates/relayer-types/src/core/ics02_client/msgs/create_client.rs @@ -102,7 +102,7 @@ mod tests { let msg = MsgCreateClient::new( tm_client_state, - TmConsensusState::try_from(tm_header).unwrap().into(), + TmConsensusState::from(tm_header).into(), signer, ) .unwrap(); diff --git a/crates/relayer-types/src/core/ics02_client/trust_threshold.rs b/crates/relayer-types/src/core/ics02_client/trust_threshold.rs index 9249f216c5..ceabf7cc3e 100644 --- a/crates/relayer-types/src/core/ics02_client/trust_threshold.rs +++ b/crates/relayer-types/src/core/ics02_client/trust_threshold.rs @@ -9,6 +9,7 @@ use std::{ Error as FmtError, Formatter, }, + str::FromStr, }; use ibc_proto::{ @@ -131,23 +132,37 @@ impl Display for TrustThreshold { } } +impl FromStr for TrustThreshold { + type Err = String; + + fn from_str(s: &str) -> Result { + let parts: Vec<&str> = s.split('/').collect(); + + if parts.len() != 2 { + return Err(format!("invalid trust threshold, must be a fraction: {s}")); + } + + let (num, denom) = (parts[0].parse(), parts[1].parse()); + + if let (Ok(num), Ok(denom)) = (num, denom) { + TrustThreshold::new(num, denom).map_err(|e| e.to_string()) + } else { + Err(format!("invalid trust threshold, must be a fraction: {s}",)) + } + } +} + impl Serialize for TrustThreshold { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, { - #[derive(Serialize)] - struct TrustThreshold { - numerator: u64, - denominator: u64, - } - - let tt = TrustThreshold { - numerator: self.numerator(), - denominator: self.denominator(), - }; + use serde::ser::SerializeStruct; - tt.serialize(serializer) + let mut s = serializer.serialize_struct("TrustThreshold", 2)?; + s.serialize_field("numerator", &self.numerator())?; + s.serialize_field("denominator", &self.denominator())?; + s.end() } } @@ -156,13 +171,99 @@ impl<'de> Deserialize<'de> for TrustThreshold { where D: serde::Deserializer<'de>, { - #[derive(Deserialize)] - struct TrustThreshold { - numerator: u64, - denominator: u64, + use std::fmt; + + use serde::de::{ + self, + Visitor, + }; + + // This is a Visitor that forwards string types to T's `FromStr` impl and + // forwards map types to T's `Deserialize` impl. The `PhantomData` is to + // keep the compiler from complaining about T being an unused generic type + // parameter. We need T in order to know the Value type for the Visitor + // impl. + struct StringOrStruct; + + impl<'de> Visitor<'de> for StringOrStruct { + type Value = TrustThreshold; + + fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str( + "string (eg. '1/3') or map `{ numerator = , denominator = }`", + ) + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + Ok(FromStr::from_str(value).unwrap()) + } + + fn visit_map(self, map: M) -> Result + where + M: de::MapAccess<'de>, + { + #[derive(Deserialize)] + struct TT { + #[serde(deserialize_with = "string_or_int")] + numerator: u64, + #[serde(deserialize_with = "string_or_int")] + denominator: u64, + } + + let tt = TT::deserialize(de::value::MapAccessDeserializer::new(map))?; + + TrustThreshold::new(tt.numerator, tt.denominator).map_err(de::Error::custom) + } + } + + deserializer.deserialize_any(StringOrStruct) + } +} + +fn string_or_int<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + use std::fmt; + + use serde::de::{ + self, + Visitor, + }; + + struct StringOrInt; + + impl<'de> Visitor<'de> for StringOrInt { + type Value = u64; + + fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("string or int") + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + FromStr::from_str(value).map_err(de::Error::custom) } - let tt = TrustThreshold::deserialize(deserializer)?; - Self::new(tt.numerator, tt.denominator).map_err(serde::de::Error::custom) + fn visit_i64(self, value: i64) -> Result + where + E: de::Error, + { + Ok(value as u64) + } + + fn visit_u64(self, value: u64) -> Result + where + E: de::Error, + { + Ok(value) + } } + + deserializer.deserialize_any(StringOrInt) } diff --git a/crates/relayer-types/src/core/ics03_connection/error.rs b/crates/relayer-types/src/core/ics03_connection/error.rs index 761cb4f99d..c2106a7179 100644 --- a/crates/relayer-types/src/core/ics03_connection/error.rs +++ b/crates/relayer-types/src/core/ics03_connection/error.rs @@ -99,7 +99,7 @@ define_error! { VerifyConnectionState [ client_error::Error ] - | _ | { "error verifying connnection state" }, + | _ | { "error verifying connection state" }, Signer [ SignerError ] diff --git a/crates/relayer-types/src/core/ics03_connection/version.rs b/crates/relayer-types/src/core/ics03_connection/version.rs index ecb048e22a..d6f33bb8ce 100644 --- a/crates/relayer-types/src/core/ics03_connection/version.rs +++ b/crates/relayer-types/src/core/ics03_connection/version.rs @@ -27,7 +27,7 @@ pub struct Version { } impl Version { - /// Checks whether or not the given feature is supported in this versin + /// Checks whether or not the given feature is supported in this version pub fn is_supported_feature(&self, feature: String) -> bool { self.features.contains(&feature) } diff --git a/crates/relayer-types/src/core/ics04_channel/error.rs b/crates/relayer-types/src/core/ics04_channel/error.rs index 293c2d2a50..3f80270d03 100644 --- a/crates/relayer-types/src/core/ics04_channel/error.rs +++ b/crates/relayer-types/src/core/ics04_channel/error.rs @@ -115,7 +115,7 @@ define_error! { | _ | { "missing channel end" }, InvalidVersionLengthConnection - | _ | { "single version must be negociated on connection before opening channel" }, + | _ | { "single version must be negotiated on connection before opening channel" }, ChannelFeatureNotSuportedByConnection | _ | { "the channel ordering is not supported by connection" }, diff --git a/crates/relayer-types/src/core/ics04_channel/events.rs b/crates/relayer-types/src/core/ics04_channel/events.rs index bead9da7b5..2a4be81fd6 100644 --- a/crates/relayer-types/src/core/ics04_channel/events.rs +++ b/crates/relayer-types/src/core/ics04_channel/events.rs @@ -108,46 +108,6 @@ impl From for Vec { } } -/// Convert attributes to Tendermint ABCI tags -impl TryFrom for Vec { - type Error = Error; - fn try_from(p: Packet) -> Result { - let mut attributes = vec![]; - let src_port = (PKT_SRC_PORT_ATTRIBUTE_KEY, p.source_port.to_string()).into(); - attributes.push(src_port); - let src_channel = (PKT_SRC_CHANNEL_ATTRIBUTE_KEY, p.source_channel.to_string()).into(); - attributes.push(src_channel); - let dst_port = (PKT_DST_PORT_ATTRIBUTE_KEY, p.destination_port.to_string()).into(); - attributes.push(dst_port); - let dst_channel = ( - PKT_DST_CHANNEL_ATTRIBUTE_KEY, - p.destination_channel.to_string(), - ) - .into(); - attributes.push(dst_channel); - let sequence = (PKT_SEQ_ATTRIBUTE_KEY, p.sequence.to_string()).into(); - attributes.push(sequence); - let timeout_height = ( - PKT_TIMEOUT_HEIGHT_ATTRIBUTE_KEY, - p.timeout_height.to_event_attribute_value(), - ) - .into(); - attributes.push(timeout_height); - let timeout_timestamp = ( - PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY, - p.timeout_timestamp.nanoseconds().to_string(), - ) - .into(); - attributes.push(timeout_timestamp); - let val = str::from_utf8(&p.data).expect("hex-encoded string should always be valid UTF-8"); - let packet_data = (PKT_DATA_ATTRIBUTE_KEY, val).into(); - attributes.push(packet_data); - let ack = (PKT_ACK_ATTRIBUTE_KEY, "").into(); - attributes.push(ack); - Ok(attributes) - } -} - pub trait EventType { fn event_type() -> IbcEventType; } @@ -569,17 +529,6 @@ impl From for IbcEvent { } } -impl TryFrom for abci::Event { - type Error = Error; - - fn try_from(v: SendPacket) -> Result { - Ok(Self { - kind: IbcEventType::SendPacket.as_str().to_owned(), - attributes: v.packet.try_into()?, - }) - } -} - #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct ReceivePacket { pub packet: Packet, @@ -612,17 +561,6 @@ impl From for IbcEvent { } } -impl TryFrom for abci::Event { - type Error = Error; - - fn try_from(v: ReceivePacket) -> Result { - Ok(Self { - kind: IbcEventType::ReceivePacket.as_str().to_owned(), - attributes: v.packet.try_into()?, - }) - } -} - #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct WriteAcknowledgement { pub packet: Packet, @@ -662,21 +600,6 @@ impl From for IbcEvent { } } -impl TryFrom for abci::Event { - type Error = Error; - - fn try_from(v: WriteAcknowledgement) -> Result { - let mut attributes: Vec<_> = v.packet.try_into()?; - let val = str::from_utf8(&v.ack).expect("hex-encoded string should always be valid UTF-8"); - let ack = (PKT_ACK_ATTRIBUTE_KEY, val).into(); - attributes.push(ack); - Ok(Self { - kind: IbcEventType::WriteAck.as_str().to_owned(), - attributes, - }) - } -} - #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct AcknowledgePacket { pub packet: Packet, @@ -703,17 +626,6 @@ impl From for IbcEvent { } } -impl TryFrom for abci::Event { - type Error = Error; - - fn try_from(v: AcknowledgePacket) -> Result { - Ok(Self { - kind: IbcEventType::AckPacket.as_str().to_owned(), - attributes: v.packet.try_into()?, - }) - } -} - #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct TimeoutPacket { pub packet: Packet, @@ -746,17 +658,6 @@ impl From for IbcEvent { } } -impl TryFrom for abci::Event { - type Error = Error; - - fn try_from(v: TimeoutPacket) -> Result { - Ok(Self { - kind: IbcEventType::Timeout.as_str().to_owned(), - attributes: v.packet.try_into()?, - }) - } -} - #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct TimeoutOnClosePacket { pub packet: Packet, @@ -788,14 +689,3 @@ impl From for IbcEvent { IbcEvent::TimeoutOnClosePacket(v) } } - -impl TryFrom for abci::Event { - type Error = Error; - - fn try_from(v: TimeoutOnClosePacket) -> Result { - Ok(Self { - kind: IbcEventType::TimeoutOnClose.as_str().to_owned(), - attributes: v.packet.try_into()?, - }) - } -} diff --git a/crates/relayer-types/src/core/ics04_channel/packet.rs b/crates/relayer-types/src/core/ics04_channel/packet.rs index 0e8cc536e1..413f2ade2d 100644 --- a/crates/relayer-types/src/core/ics04_channel/packet.rs +++ b/crates/relayer-types/src/core/ics04_channel/packet.rs @@ -56,9 +56,7 @@ impl core::fmt::Display for PacketMsgType { } /// The sequence number of a packet enforces ordering among packets from the same source. -#[derive( - Copy, Clone, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize, Serialize, -)] +#[derive(Copy, Clone, Default, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize, Serialize)] pub struct Sequence(u64); impl FromStr for Sequence { @@ -72,6 +70,9 @@ impl FromStr for Sequence { } impl Sequence { + pub const MIN: Self = Self(0); + pub const MAX: Self = Self(u64::MAX); + pub fn is_zero(&self) -> bool { self.0 == 0 } @@ -79,6 +80,10 @@ impl Sequence { pub fn increment(&self) -> Sequence { Sequence(self.0 + 1) } + + pub fn as_u64(&self) -> u64 { + self.0 + } } impl From for Sequence { @@ -93,9 +98,15 @@ impl From for u64 { } } +impl core::fmt::Debug for Sequence { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + self.0.fmt(f) + } +} + impl core::fmt::Display for Sequence { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "{}", self.0) + self.0.fmt(f) } } diff --git a/crates/relayer-types/src/core/ics04_channel/timeout.rs b/crates/relayer-types/src/core/ics04_channel/timeout.rs index 5917fc3242..d2ed737b02 100644 --- a/crates/relayer-types/src/core/ics04_channel/timeout.rs +++ b/crates/relayer-types/src/core/ics04_channel/timeout.rs @@ -52,7 +52,7 @@ impl TimeoutHeight { } } - /// Check if a height is *stricly past* the timeout height, and thus is + /// Check if a height is *strictly past* the timeout height, and thus is /// deemed expired. pub fn has_expired(&self, height: Height) -> bool { match self { diff --git a/crates/relayer-types/src/core/ics23_commitment/merkle.rs b/crates/relayer-types/src/core/ics23_commitment/merkle.rs index 96bc2d14da..881ab121d8 100644 --- a/crates/relayer-types/src/core/ics23_commitment/merkle.rs +++ b/crates/relayer-types/src/core/ics23_commitment/merkle.rs @@ -147,8 +147,13 @@ impl MerkleProof { } // verify the absence of key in lowest subtree - let proof = self.proofs.get(0).ok_or_else(Error::invalid_merkle_proof)?; - let spec = ics23_specs.get(0).ok_or_else(Error::invalid_merkle_proof)?; + let proof = self + .proofs + .first() + .ok_or_else(Error::invalid_merkle_proof)?; + let spec = ics23_specs + .first() + .ok_or_else(Error::invalid_merkle_proof)?; // keys are represented from root-to-leaf let key = keys .key_path @@ -191,7 +196,7 @@ fn calculate_non_existence_root(proof: &NonExistenceProof) -> Result, Er // Merkle Proof serialization notes: // "Proof" id currently defined in a number of forms and included in a number of places // - TmProof: in tendermint-rs/src/merkle/proof.rs:Proof -// - RawProofOps: in tendermint-proto/tendermint.cyrpto.rs:ProofOps +// - RawProofOps: in tendermint-proto/tendermint.crypto.rs:ProofOps // - RawMerkleProof: in ibc-proto/ibc.core.commitment.v1.rs:MerkleProof // - structure that includes a RawProofOps in its only `proof` field. // #[derive(Clone, PartialEq, ::prost::Message)] diff --git a/crates/relayer-types/src/core/ics24_host/identifier.rs b/crates/relayer-types/src/core/ics24_host/identifier.rs index 0046838e38..66ccae442c 100644 --- a/crates/relayer-types/src/core/ics24_host/identifier.rs +++ b/crates/relayer-types/src/core/ics24_host/identifier.rs @@ -318,6 +318,14 @@ impl PortId { Self("transfer".to_string()) } + pub fn oracle() -> Self { + Self("oracle".to_string()) + } + + pub fn icqhost() -> Self { + Self("icqhost".to_string()) + } + /// Get this identifier as a borrowed `&str` pub fn as_str(&self) -> &str { &self.0 diff --git a/crates/relayer-types/src/events.rs b/crates/relayer-types/src/events.rs index 0f22c9629e..8bf42e8b65 100644 --- a/crates/relayer-types/src/events.rs +++ b/crates/relayer-types/src/events.rs @@ -1,9 +1,6 @@ use std::{ borrow::Cow, - convert::{ - TryFrom, - TryInto, - }, + convert::TryFrom, fmt::{ Display, Error as FmtError, @@ -359,42 +356,6 @@ impl Display for IbcEvent { } } -impl TryFrom for abci::Event { - type Error = Error; - - fn try_from(event: IbcEvent) -> Result { - Ok(match event { - IbcEvent::CreateClient(event) => event.into(), - IbcEvent::UpdateClient(event) => event.into(), - IbcEvent::UpgradeClient(event) => event.into(), - IbcEvent::ClientMisbehaviour(event) => event.into(), - IbcEvent::OpenInitConnection(event) => event.into(), - IbcEvent::OpenTryConnection(event) => event.into(), - IbcEvent::OpenAckConnection(event) => event.into(), - IbcEvent::OpenConfirmConnection(event) => event.into(), - IbcEvent::OpenInitChannel(event) => event.into(), - IbcEvent::OpenTryChannel(event) => event.into(), - IbcEvent::OpenAckChannel(event) => event.into(), - IbcEvent::OpenConfirmChannel(event) => event.into(), - IbcEvent::CloseInitChannel(event) => event.into(), - IbcEvent::CloseConfirmChannel(event) => event.into(), - IbcEvent::SendPacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::ReceivePacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::WriteAcknowledgement(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::AcknowledgePacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::TimeoutPacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::TimeoutOnClosePacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::IncentivizedPacket(event) => event.into(), - IbcEvent::CrossChainQueryPacket(event) => event.into(), - IbcEvent::DistributeFeePacket(event) => event.into(), - IbcEvent::AppModule(event) => event.try_into()?, - IbcEvent::NewBlock(_) | IbcEvent::ChainError(_) => { - return Err(Error::incorrect_event_type(event.to_string())); - } - }) - } -} - impl IbcEvent { pub fn to_json(&self) -> String { match serde_json::to_string(self) { diff --git a/crates/relayer-types/src/mock/client_state.rs b/crates/relayer-types/src/mock/client_state.rs index e98bc56b45..3d7c30c37a 100644 --- a/crates/relayer-types/src/mock/client_state.rs +++ b/crates/relayer-types/src/mock/client_state.rs @@ -47,10 +47,6 @@ impl MockClientState { pub fn latest_height(&self) -> Height { self.header.height() } - - pub fn refresh_time(&self) -> Option { - None - } } impl Protobuf for MockClientState {} diff --git a/crates/relayer/Cargo.toml b/crates/relayer/Cargo.toml index a6cf5721f1..5bbee7a8c3 100644 --- a/crates/relayer/Cargo.toml +++ b/crates/relayer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-relayer" -version = "0.26.3" +version = "0.26.4" edition = "2021" license = "Apache-2.0" readme = "README.md" @@ -21,8 +21,8 @@ telemetry = ["ibc-telemetry"] [dependencies] ibc-proto = { version = "0.39.0", features = ["serde"] } -ibc-telemetry = { version = "0.26.3", path = "../telemetry", optional = true } -ibc-relayer-types = { version = "0.26.3", path = "../relayer-types", features = ["mocks"] } +ibc-telemetry = { version = "0.26.4", path = "../telemetry", optional = true } +ibc-relayer-types = { version = "0.26.4", path = "../relayer-types", features = ["mocks"] } # TODO: bump this after IBC PRs (specifically balance query) are merged astria-core = { git = "https://github.com/astriaorg/astria", rev = "6ffbced612a7244ccea146db90c6cfb8c1aa7466" } @@ -39,7 +39,7 @@ humantime-serde = "1.1.1" serde = "1.0" serde_derive = "1.0" thiserror = "1.0.40" -toml = "0.7" +toml = "0.8" tracing = "0.1.36" tokio = { version = "1.0", features = ["rt-multi-thread", "time", "sync"] } serde_json = { version = "1" } @@ -47,9 +47,9 @@ bytes = "1.4.0" prost = { version = "0.12" } tonic = { version = "0.10", features = ["tls", "tls-roots"] } futures = "0.3.27" -crossbeam-channel = "0.5.8" +crossbeam-channel = "0.5.11" hex = "0.4" -bitcoin = { version = "0.30.1", features = ["serde"] } +bitcoin = { version = "0.31.1", features = ["serde"] } tiny-bip39 = "1.0.0" hdpath = "0.6.3" sha2 = "0.10.6" @@ -68,17 +68,17 @@ semver = "1.0" humantime = "2.1.0" regex = "1" moka = { version = "0.12.0", features = ["sync"] } -uuid = { version = "1.4.0", features = ["v4"] } +uuid = { version = "1.6.1", features = ["v4"] } bs58 = "0.5.0" digest = "0.10.6" ed25519 = "2.2.2" ed25519-dalek = { version = "2.0.0", features = ["serde"] } ed25519-dalek-bip32 = "0.3.0" generic-array = "0.14.7" -secp256k1 = { version = "0.27.0", features = ["rand-std"] } +secp256k1 = { version = "0.28.1", features = ["rand-std"] } strum = { version = "0.25", features = ["derive"] } tokio-stream = "0.1.14" -once_cell = "1.17.1" +once_cell = "1.19.0" tracing-subscriber = { version = "0.3.14", features = ["fmt", "env-filter", "json"] } [dependencies.byte-unit] @@ -119,10 +119,10 @@ version = "0.34.0" default-features = false [dev-dependencies] -ibc-relayer-types = { version = "0.26.3", path = "../relayer-types", features = ["mocks"] } +ibc-relayer-types = { version = "0.26.4", path = "../relayer-types", features = ["mocks"] } serial_test = "2.0.0" env_logger = "0.10.0" -test-log = { version = "0.2.10", features = ["trace"] } +test-log = { version = "0.2.14", features = ["trace"] } # Needed for generating (synthetic) light blocks. tendermint-testgen = { version = "0.34.0" } diff --git a/crates/relayer/src/chain/cosmos.rs b/crates/relayer/src/chain/cosmos.rs index 53d9b9b62e..e89224f7b5 100644 --- a/crates/relayer/src/chain/cosmos.rs +++ b/crates/relayer/src/chain/cosmos.rs @@ -2146,7 +2146,7 @@ impl ChainEndpoint for CosmosSdkChain { /// Note - there is no way to format the packet query such that it asks for Tx-es with either /// sequence (the query conditions can only be AND-ed). /// There is a possibility to include "<=" and ">=" conditions but it doesn't work with - /// string attributes (sequence is emmitted as a string). + /// string attributes (sequence is emitted as a string). /// Therefore, for packets we perform one tx_search for each sequence. /// Alternatively, a single query for all packets could be performed but it would return all /// packets ever sent. diff --git a/crates/relayer/src/chain/cosmos/client.rs b/crates/relayer/src/chain/cosmos/client.rs index 96bcb6c797..dfae496fe0 100644 --- a/crates/relayer/src/chain/cosmos/client.rs +++ b/crates/relayer/src/chain/cosmos/client.rs @@ -42,7 +42,7 @@ impl Settings { let trust_threshold = options .trust_threshold - .unwrap_or_else(|| src_chain_config.trust_threshold.into()); + .unwrap_or(src_chain_config.trust_threshold); Settings { max_clock_drift, diff --git a/crates/relayer/src/chain/cosmos/compatibility.rs b/crates/relayer/src/chain/cosmos/compatibility.rs index f98b38865a..990fc0b60a 100644 --- a/crates/relayer/src/chain/cosmos/compatibility.rs +++ b/crates/relayer/src/chain/cosmos/compatibility.rs @@ -10,7 +10,7 @@ use super::version; /// # Note: Should be consistent with [features] guide page. /// /// [features]: https://hermes.informal.systems/advanced/features.html -const SDK_MODULE_VERSION_REQ: &str = ">=0.44, <0.48"; +const SDK_MODULE_VERSION_REQ: &str = ">=0.45, <0.51"; /// Specifies the IBC-go module version requirement. /// At the moment, we support both chains with and without @@ -20,7 +20,7 @@ const SDK_MODULE_VERSION_REQ: &str = ">=0.44, <0.48"; /// # Note: Should be consistent with [features] guide page. /// /// [features]: https://hermes.informal.systems/advanced/features.html -const IBC_GO_MODULE_VERSION_REQ: &str = ">=1.1, <=7"; +const IBC_GO_MODULE_VERSION_REQ: &str = ">=4.1.1, <9"; #[derive(Error, Debug)] pub enum Diagnostic { diff --git a/crates/relayer/src/chain/cosmos/config.rs b/crates/relayer/src/chain/cosmos/config.rs index 3bfe68b90a..c64e8e0503 100644 --- a/crates/relayer/src/chain/cosmos/config.rs +++ b/crates/relayer/src/chain/cosmos/config.rs @@ -10,7 +10,6 @@ use serde_derive::{ Deserialize, Serialize, }; -use tendermint_light_client::verifier::types::TrustThreshold; use tendermint_rpc::Url; use crate::{ @@ -24,6 +23,7 @@ use crate::{ MaxMsgNum, MaxTxSize, Memo, + TrustThreshold, }, AddressType, EventSourceMode, @@ -31,6 +31,7 @@ use crate::{ GasPrice, GenesisRestart, PacketFilter, + RefreshRate, }, keyring::Store, }; @@ -79,13 +80,20 @@ pub struct CosmosSdkConfig { pub gas_multiplier: Option, pub fee_granter: Option, + #[serde(default)] pub max_msg_num: MaxMsgNum, + #[serde(default)] pub max_tx_size: MaxTxSize, + #[serde(default = "default::max_grpc_decoding_size")] pub max_grpc_decoding_size: Byte, + /// How many packets to fetch at once from the chain when clearing packets + #[serde(default = "default::query_packets_chunk_size")] + pub query_packets_chunk_size: usize, + /// A correction parameter that helps deal with clocks that are only approximately synchronized /// between the source and destination chains for a client. /// This parameter is used when deciding to accept or reject a new header @@ -103,6 +111,11 @@ pub struct CosmosSdkConfig { #[serde(default, with = "humantime_serde")] pub trusting_period: Option, + /// The rate at which to refresh the client referencing this chain, + /// expressed as a fraction of the trusting period. + #[serde(default = "default::client_refresh_rate")] + pub client_refresh_rate: RefreshRate, + /// CCV consumer chain #[serde(default = "default::ccv_consumer_chain")] pub ccv_consumer_chain: bool, diff --git a/crates/relayer/src/chain/cosmos/config/error.rs b/crates/relayer/src/chain/cosmos/config/error.rs index 41ee500266..9500fce21b 100644 --- a/crates/relayer/src/chain/cosmos/config/error.rs +++ b/crates/relayer/src/chain/cosmos/config/error.rs @@ -1,34 +1,34 @@ use flex_error::define_error; -use ibc_relayer_types::core::ics24_host::identifier::ChainId; -use tendermint_light_client_verifier::types::TrustThreshold; +use ibc_relayer_types::core::{ + ics02_client::trust_threshold::TrustThreshold, + ics24_host::identifier::ChainId, +}; define_error! { - Error { - InvalidTrustThreshold - { - threshold: TrustThreshold, - chain_id: ChainId, - reason: String - } - |e| { - format!("config file specifies an invalid `trust_threshold` ({0}) for the chain '{1}', caused by: {2}", - e.threshold, e.chain_id, e.reason) - }, - -DeprecatedGasAdjustment - { - gas_adjustment: f64, - gas_multiplier: f64, - chain_id: ChainId, - } - |e| { - format!( - "config file specifies deprecated setting `gas_adjustment = {1}` for the chain '{0}'; \ - to get the same behavior, use `gas_multiplier = {2}", - e.chain_id, e.gas_adjustment, e.gas_multiplier - ) - }, + InvalidTrustThreshold + { + threshold: TrustThreshold, + chain_id: ChainId, + reason: String + } + |e| { + format!("config file specifies an invalid `trust_threshold` ({0}) for the chain '{1}', caused by: {2}", + e.threshold, e.chain_id, e.reason) + }, + DeprecatedGasAdjustment + { + gas_adjustment: f64, + gas_multiplier: f64, + chain_id: ChainId, + } + |e| { + format!( + "config file specifies deprecated setting `gas_adjustment = {1}` for the chain '{0}'; \ + to get the same behavior, use `gas_multiplier = {2}", + e.chain_id, e.gas_adjustment, e.gas_multiplier + ) + }, } } diff --git a/crates/relayer/src/chain/cosmos/query/tx.rs b/crates/relayer/src/chain/cosmos/query/tx.rs index 45100f9842..859f69f7f0 100644 --- a/crates/relayer/src/chain/cosmos/query/tx.rs +++ b/crates/relayer/src/chain/cosmos/query/tx.rs @@ -74,7 +74,7 @@ pub async fn query_txs( // query the first Tx that includes the event matching the client request // Note: it is possible to have multiple Tx-es for same client and consensus height. - // In this case it must be true that the client updates were performed with tha + // In this case it must be true that the client updates were performed with the // same header as the first one, otherwise a subsequent transaction would have // failed on chain. Therefore only one Tx is of interest and current API returns // the first one. @@ -133,7 +133,7 @@ pub async fn query_txs( /// Note - there is no way to format the packet query such that it asks for Tx-es with either /// sequence (the query conditions can only be AND-ed). /// There is a possibility to include "<=" and ">=" conditions but it doesn't work with -/// string attributes (sequence is emmitted as a string). +/// string attributes (sequence is emitted as a string). /// Therefore, for packets we perform one tx_search for each sequence. /// Alternatively, a single query for all packets could be performed but it would return all /// packets ever sent. diff --git a/crates/relayer/src/chain/counterparty.rs b/crates/relayer/src/chain/counterparty.rs index 9aeba2c47d..d13e16be0d 100644 --- a/crates/relayer/src/chain/counterparty.rs +++ b/crates/relayer/src/chain/counterparty.rs @@ -56,6 +56,7 @@ use crate::{ client_state::IdentifiedAnyClientState, path::PathIdentifiers, supervisor::Error, + telemetry, }; pub fn counterparty_chain_from_connection( @@ -527,6 +528,19 @@ pub fn unreceived_packets( &path.counterparty_channel_id, )?; + telemetry!( + update_backlog, + commit_sequences + .iter() + .map(|s| u64::from(*s)) + .collect::>() + .clone(), + &counterparty_chain.id(), + &path.counterparty_channel_id, + &path.counterparty_port_id, + &chain.id() + ); + let packet_seq_nrs = unreceived_packets_sequences(chain, &path.port_id, &path.channel_id, commit_sequences)?; diff --git a/crates/relayer/src/channel.rs b/crates/relayer/src/channel.rs index 69324ee1a0..a901aabfb2 100644 --- a/crates/relayer/src/channel.rs +++ b/crates/relayer/src/channel.rs @@ -1073,7 +1073,7 @@ impl Channel { let counterparty = Counterparty::new(self.src_port_id().clone(), self.src_channel_id().cloned()); - // Re-use the version that was either set on ChanOpenInit or overwritten by the application. + // Reuse the version that was either set on ChanOpenInit or overwritten by the application. let version = src_channel.version().clone(); let channel = ChannelEnd::new( diff --git a/crates/relayer/src/client_state.rs b/crates/relayer/src/client_state.rs index 482de10e20..2178c3d006 100644 --- a/crates/relayer/src/client_state.rs +++ b/crates/relayer/src/client_state.rs @@ -107,30 +107,30 @@ impl AnyClientState { } } - pub fn max_clock_drift(&self) -> Duration { + pub fn trusting_period(&self) -> Duration { match self { - AnyClientState::Tendermint(state) => state.max_clock_drift, + AnyClientState::Tendermint(state) => state.trusting_period, #[cfg(test)] - AnyClientState::Mock(_) => Duration::new(0, 0), + AnyClientState::Mock(_) => Duration::from_secs(14 * 24 * 60 * 60), // 2 weeks } } - pub fn client_type(&self) -> ClientType { + pub fn max_clock_drift(&self) -> Duration { match self { - Self::Tendermint(state) => state.client_type(), + AnyClientState::Tendermint(state) => state.max_clock_drift, #[cfg(test)] - Self::Mock(state) => state.client_type(), + AnyClientState::Mock(_) => Duration::new(0, 0), } } - pub fn refresh_period(&self) -> Option { + pub fn client_type(&self) -> ClientType { match self { - AnyClientState::Tendermint(tm_state) => tm_state.refresh_time(), + Self::Tendermint(state) => state.client_type(), #[cfg(test)] - AnyClientState::Mock(mock_state) => mock_state.refresh_time(), + Self::Mock(state) => state.client_type(), } } } diff --git a/crates/relayer/src/config.rs b/crates/relayer/src/config.rs index efddb38477..aee6a3ca64 100644 --- a/crates/relayer/src/config.rs +++ b/crates/relayer/src/config.rs @@ -5,6 +5,7 @@ pub mod error; pub mod filter; pub mod gas_multiplier; pub mod proof_specs; +pub mod refresh_rate; pub mod types; use alloc::collections::BTreeMap; @@ -41,6 +42,7 @@ use ibc_relayer_types::{ }, timestamp::ZERO_DURATION, }; +pub use refresh_rate::RefreshRate; use serde_derive::{ Deserialize, Serialize, @@ -54,6 +56,10 @@ use tendermint_rpc::{ pub use crate::config::Error as ConfigError; use crate::{ chain::cosmos::config::CosmosSdkConfig, + config::types::{ + ics20_field_size_limit::Ics20FieldSizeLimit, + TrustThreshold, + }, error::Error as RelayerError, extension_options::ExtensionOptionDynamicFeeTx, keyring, @@ -127,7 +133,7 @@ impl PartialOrd for GasPrice { /// the parsing of other prices. pub fn parse_gas_prices(prices: String) -> Vec { prices - .split(';') + .split(|c| c == ',' || c == ';') .filter_map(|gp| GasPrice::from_str(gp).ok()) .collect() } @@ -187,6 +193,10 @@ pub mod default { 100 } + pub fn query_packets_chunk_size() -> usize { + 50 + } + pub fn rpc_timeout() -> Duration { Duration::from_secs(60) } @@ -223,6 +233,15 @@ pub mod default { Byte::from_bytes(33554432) } + pub fn trust_threshold() -> TrustThreshold { + TrustThreshold::TWO_THIRDS + } + + pub fn client_refresh_rate() -> RefreshRate { + // Refresh the client three times per trusting period + RefreshRate::new(1, 3) + } + pub fn latency_submitted() -> HistogramConfig { HistogramConfig { range: Range { @@ -242,6 +261,14 @@ pub mod default { buckets: 10, } } + + pub fn ics20_max_memo_size() -> Ics20FieldSizeLimit { + Ics20FieldSizeLimit::new(true, Byte::from_bytes(32768)) + } + + pub fn ics20_max_receiver_size() -> Ics20FieldSizeLimit { + Ics20FieldSizeLimit::new(true, Byte::from_bytes(2048)) + } } #[derive(Clone, Debug, Default, Deserialize, Serialize)] @@ -413,6 +440,10 @@ pub struct Packets { pub tx_confirmation: bool, #[serde(default = "default::auto_register_counterparty_payee")] pub auto_register_counterparty_payee: bool, + #[serde(default = "default::ics20_max_memo_size")] + pub ics20_max_memo_size: Ics20FieldSizeLimit, + #[serde(default = "default::ics20_max_receiver_size")] + pub ics20_max_receiver_size: Ics20FieldSizeLimit, } impl Default for Packets { @@ -423,6 +454,8 @@ impl Default for Packets { clear_on_start: default::clear_on_start(), tx_confirmation: default::tx_confirmation(), auto_register_counterparty_payee: default::auto_register_counterparty_payee(), + ics20_max_memo_size: default::ics20_max_memo_size(), + ics20_max_receiver_size: default::ics20_max_receiver_size(), } } } @@ -698,6 +731,7 @@ impl ChainConfig { .collect() } }; + Ok(keys) } @@ -728,6 +762,20 @@ impl ChainConfig { Self::Astria(config) => &config.event_source, } } + + pub fn query_packets_chunk_size(&self) -> usize { + match self { + Self::CosmosSdk(config) => config.query_packets_chunk_size, + Self::Astria(config) => config.query_packets_chunk_size, + } + } + + pub fn set_query_packets_chunk_size(&mut self, query_packets_chunk_size: usize) { + match self { + Self::CosmosSdk(config) => config.query_packets_chunk_size = query_packets_chunk_size, + Self::Astria(config) => config.query_packets_chunk_size = query_packets_chunk_size, + } + } } /// Attempt to load and parse the TOML config file as a `Config`. @@ -792,6 +840,7 @@ impl> From> for Diagnostic { } use crate::chain::cosmos::config::error::Error as CosmosConfigError; + impl From for Error { fn from(error: CosmosConfigError) -> Error { Error::cosmos_config_error(error.to_string()) @@ -894,9 +943,6 @@ mod tests { #[test] fn parse_multiple_gas_prices() { - let gas_prices = "0.25token1;0.0001token2"; - let parsed = parse_gas_prices(gas_prices.to_string()); - let expected = vec![ GasPrice { price: 0.25, @@ -908,7 +954,15 @@ mod tests { }, ]; - assert_eq!(expected, parsed); + let test_cases = vec![ + ("0.25token1;0.0001token2", expected.clone()), + ("0.25token1,0.0001token2", expected.clone()), + ]; + + for (input, expected) in test_cases { + let parsed = parse_gas_prices(input.to_string()); + assert_eq!(expected, parsed); + } } #[test] @@ -921,14 +975,18 @@ mod tests { #[test] fn malformed_gas_prices_do_not_get_parsed() { - let malformed_prices = "token1;.token2;0.25token3"; - let parsed = parse_gas_prices(malformed_prices.to_string()); - let expected = vec![GasPrice { price: 0.25, denom: "token3".to_owned(), }]; + let test_cases = vec![ + ("token1;.token2;0.25token3", expected.clone()), + ("token1,.token2,0.25token3", expected.clone()), + ]; - assert_eq!(expected, parsed); + for (input, expected) in test_cases { + let parsed = parse_gas_prices(input.to_string()); + assert_eq!(expected, parsed); + } } } diff --git a/crates/relayer/src/config/refresh_rate.rs b/crates/relayer/src/config/refresh_rate.rs new file mode 100644 index 0000000000..0c14d6c84f --- /dev/null +++ b/crates/relayer/src/config/refresh_rate.rs @@ -0,0 +1,60 @@ +use std::str::FromStr; + +use serde::{ + Deserialize, + Deserializer, + Serialize, + Serializer, +}; + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct RefreshRate { + numerator: u64, + denominator: u64, +} + +impl RefreshRate { + pub fn new(numerator: u64, denominator: u64) -> Self { + Self { + numerator, + denominator, + } + } + + pub fn as_f64(self) -> f64 { + self.numerator as f64 / self.denominator as f64 + } +} + +impl FromStr for RefreshRate { + type Err = String; + + fn from_str(s: &str) -> Result { + let parts: Vec<&str> = s.split('/').collect(); + + if parts.len() != 2 { + return Err(format!("invalid refresh rate, must be a fraction: {s}")); + } + + let (num, denom) = (parts[0].parse(), parts[1].parse()); + + if let (Ok(num), Ok(denom)) = (num, denom) { + Ok(RefreshRate::new(num, denom)) + } else { + Err(format!("invalid refresh rate, must be a fraction: {s}",)) + } + } +} + +impl Serialize for RefreshRate { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(&format!("{}/{}", self.numerator, self.denominator)) + } +} + +impl<'de> Deserialize<'de> for RefreshRate { + fn deserialize>(deserializer: D) -> Result { + let s = String::deserialize(deserializer)?; + RefreshRate::from_str(&s).map_err(serde::de::Error::custom) + } +} diff --git a/crates/relayer/src/config/types.rs b/crates/relayer/src/config/types.rs index 120b7b2417..4eb5670bbb 100644 --- a/crates/relayer/src/config/types.rs +++ b/crates/relayer/src/config/types.rs @@ -3,6 +3,7 @@ //! Implements defaults, as well as serializing and //! deserializing with upper-bound verification. +pub use ibc_relayer_types::core::ics02_client::trust_threshold::TrustThreshold; pub use max_msg_num::MaxMsgNum; pub mod max_msg_num { @@ -131,6 +132,10 @@ pub mod max_tx_size { Ok(Self(value)) } + pub fn unsafe_new(value: usize) -> Self { + Self(value) + } + pub fn max() -> Self { Self(Self::MAX_BOUND) } @@ -285,6 +290,65 @@ pub mod memo { } } +pub mod ics20_field_size_limit { + use std::fmt::Display; + + use byte_unit::Byte; + use serde_derive::{ + Deserialize, + Serialize, + }; + + pub enum ValidationResult { + Valid, + Invalid { size: usize, max: usize }, + } + + impl Display for ValidationResult { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Valid => write!(f, "valid"), + + Self::Invalid { size, max } => { + write!(f, "invalid, size `{size}` is greater than max `{max}`") + } + } + } + } + + #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] + pub struct Ics20FieldSizeLimit { + enabled: bool, + size: Byte, + } + + impl Ics20FieldSizeLimit { + pub fn new(enabled: bool, size: Byte) -> Self { + Self { enabled, size } + } + + /// If the limit is disabled consider the field as valid. + /// If the limit is enabled assert the field is smaller or equal + /// to the configured value. + pub fn check_field_size(&self, field: &str) -> ValidationResult { + if self.enabled { + let size_limit = self.size.get_bytes() as usize; + + if field.len() <= size_limit { + ValidationResult::Valid + } else { + ValidationResult::Invalid { + size: field.len(), + max: size_limit, + } + } + } else { + ValidationResult::Valid + } + } + } +} + #[cfg(test)] #[allow(dead_code)] // the fields of the structs defined below are never accessed mod tests { diff --git a/crates/relayer/src/error.rs b/crates/relayer/src/error.rs index d03bd4969b..d561524a54 100644 --- a/crates/relayer/src/error.rs +++ b/crates/relayer/src/error.rs @@ -348,7 +348,7 @@ define_error! { ChannelReceiveTimeout [ TraceError ] - |_| { "timeout when waiting for reponse over inter-thread channel" }, + |_| { "timeout when waiting for response over inter-thread channel" }, InvalidInputHeader |_| { "the input header is not recognized as a header for this chain" }, @@ -692,7 +692,7 @@ impl GrpcStatusSubdetail { /// ## Note /// This error may happen even when packets are submitted in order when the `simulate_tx` /// gRPC endpoint is allowed to be called after a block is created and before - /// Tendermint/mempool finishes `recheck_tx`, similary to the issue described in + /// Tendermint/mempool finishes `recheck_tx`, similarly to the issue described in /// . /// /// See for more info. diff --git a/crates/relayer/src/event.rs b/crates/relayer/src/event.rs index 13e9f664e0..3f9decf6af 100644 --- a/crates/relayer/src/event.rs +++ b/crates/relayer/src/event.rs @@ -503,8 +503,6 @@ mod tests { timestamp::Timestamp, }; - use super::*; - #[test] fn extract_header() { let header = get_dummy_ics07_header(); @@ -599,50 +597,4 @@ mod tests { } } } - - #[test] - fn packet_event_to_abci_event() { - let packet = Packet { - sequence: Sequence::from(10), - source_port: "a_test_port".parse().unwrap(), - source_channel: "channel-0".parse().unwrap(), - destination_port: "b_test_port".parse().unwrap(), - destination_channel: "channel-1".parse().unwrap(), - data: "test_data".as_bytes().to_vec(), - timeout_height: Height::new(1, 10).unwrap().into(), - timeout_timestamp: Timestamp::now(), - }; - let mut abci_events = vec![]; - let send_packet = channel_events::SendPacket { - packet: packet.clone(), - }; - abci_events.push(AbciEvent::try_from(send_packet.clone()).unwrap()); - let write_ack = channel_events::WriteAcknowledgement { - packet: packet.clone(), - ack: "test_ack".as_bytes().to_vec(), - }; - abci_events.push(AbciEvent::try_from(write_ack.clone()).unwrap()); - let ack_packet = channel_events::AcknowledgePacket { - packet: packet.clone(), - }; - abci_events.push(AbciEvent::try_from(ack_packet.clone()).unwrap()); - let timeout_packet = channel_events::TimeoutPacket { packet }; - abci_events.push(AbciEvent::try_from(timeout_packet.clone()).unwrap()); - - for abci_event in abci_events { - match ibc_event_try_from_abci_event(&abci_event).ok() { - Some(ibc_event) => match ibc_event { - IbcEvent::SendPacket(e) => assert_eq!(e.packet, send_packet.packet), - IbcEvent::WriteAcknowledgement(e) => { - assert_eq!(e.packet, write_ack.packet); - assert_eq!(e.ack, write_ack.ack); - } - IbcEvent::AcknowledgePacket(e) => assert_eq!(e.packet, ack_packet.packet), - IbcEvent::TimeoutPacket(e) => assert_eq!(e.packet, timeout_packet.packet), - _ => panic!("unexpected event type"), - }, - None => panic!("converted event was wrong"), - } - } - } } diff --git a/crates/relayer/src/event/source/websocket/extract.rs b/crates/relayer/src/event/source/websocket/extract.rs index 8e0667220a..0fcedb8fd5 100644 --- a/crates/relayer/src/event/source/websocket/extract.rs +++ b/crates/relayer/src/event/source/websocket/extract.rs @@ -186,7 +186,7 @@ pub fn extract_events( if matches!(ibc_event, IbcEvent::SendPacket(_)) { // Should be the same as the hash of tx_result.tx? if let Some(hash) = - events.get("tx.hash").and_then(|values| values.get(0)) + events.get("tx.hash").and_then(|values| values.first()) { tracing::trace!(event = "SendPacket", "tx hash: {}", hash); } diff --git a/crates/relayer/src/foreign_client.rs b/crates/relayer/src/foreign_client.rs index 7c7070db7c..eb1bb88fa2 100644 --- a/crates/relayer/src/foreign_client.rs +++ b/crates/relayer/src/foreign_client.rs @@ -677,6 +677,7 @@ impl ForeignClient ForeignClient ForeignClient Result>, ForeignClientError> { let (client_state, elapsed) = self.validated_client_state()?; - // The refresh_window is the maximum duration - // we can backoff between subsequent client updates. - let refresh_window = client_state.refresh_period(); + let src_config = self.src_chain.config().map_err(|e| { + ForeignClientError::client_create( + self.src_chain.id(), + "failed while querying the source chain for configuration".to_string(), + e, + ) + })?; + + let refresh_rate = match src_config { + ChainConfig::CosmosSdk(config) => config.client_refresh_rate, + ChainConfig::Astria(config) => config.client_refresh_rate, + }; + + let refresh_period = client_state + .trusting_period() + .mul_f64(refresh_rate.as_f64()); - match (elapsed, refresh_window) { - (None, _) | (_, None) => Ok(None), - (Some(elapsed), Some(refresh_window)) => { - if elapsed > refresh_window { - info!(?elapsed, ?refresh_window, "client needs to be refreshed"); + match (elapsed, refresh_period) { + (None, _) => Ok(None), + (Some(elapsed), refresh_period) => { + if elapsed > refresh_period { + info!(?elapsed, ?refresh_period, "client needs to be refreshed"); self.build_latest_update_client_and_send() .map_or_else(Err, |ev| Ok(Some(ev))) diff --git a/crates/relayer/src/keyring/secp256k1_key_pair.rs b/crates/relayer/src/keyring/secp256k1_key_pair.rs index 6121458ebb..7655b3ab46 100644 --- a/crates/relayer/src/keyring/secp256k1_key_pair.rs +++ b/crates/relayer/src/keyring/secp256k1_key_pair.rs @@ -9,10 +9,10 @@ use bitcoin::{ bip32::{ ChildNumber, DerivationPath, - ExtendedPrivKey, - ExtendedPubKey, + Xpriv, + Xpub, }, - network::constants::Network, + network::Network, }; use digest::Digest; use generic_array::{ @@ -54,16 +54,15 @@ use crate::config::AddressType; pub fn private_key_from_mnemonic( mnemonic_words: &str, hd_path: &StandardHDPath, -) -> Result { +) -> Result { let mnemonic = Mnemonic::from_phrase(mnemonic_words, Language::English) .map_err(Error::invalid_mnemonic)?; let seed = Seed::new(&mnemonic, ""); - let base_key = - ExtendedPrivKey::new_master(Network::Bitcoin, seed.as_bytes()).map_err(|err| { - Error::bip32_key_generation_failed(Secp256k1KeyPair::KEY_TYPE, err.into()) - })?; + let base_key = Xpriv::new_master(Network::Bitcoin, seed.as_bytes()).map_err(|err| { + Error::bip32_key_generation_failed(Secp256k1KeyPair::KEY_TYPE, err.into()) + })?; let private_key = base_key .derive_priv( @@ -178,8 +177,8 @@ pub struct Secp256k1KeyPair { // The old `KeyEntry` type #[derive(Debug, Deserialize)] struct KeyPairV1 { - public_key: ExtendedPubKey, - private_key: ExtendedPrivKey, + public_key: Xpub, + private_key: Xpriv, account: String, address: Vec, } @@ -250,7 +249,7 @@ impl Secp256k1KeyPair { account_prefix: &str, ) -> Result { let private_key = private_key_from_mnemonic(mnemonic, hd_path)?; - let public_key = ExtendedPubKey::from_priv(&Secp256k1::signing_only(), &private_key); + let public_key = Xpub::from_priv(&Secp256k1::signing_only(), &private_key); let address = get_address(&public_key.public_key, address_type); let account = encode_address(account_prefix, &address)?; @@ -277,7 +276,7 @@ impl SigningKeyPair for Secp256k1KeyPair { // Decode the private key from the mnemonic let private_key = private_key_from_mnemonic(&key_file.mnemonic, hd_path)?; - let derived_pubkey = ExtendedPubKey::from_priv(&Secp256k1::signing_only(), &private_key); + let derived_pubkey = Xpub::from_priv(&Secp256k1::signing_only(), &private_key); let derived_pubkey_bytes = derived_pubkey.public_key.serialize().to_vec(); assert!(derived_pubkey_bytes.len() <= keyfile_pubkey_bytes.len()); @@ -336,9 +335,10 @@ impl SigningKeyPair for Secp256k1KeyPair { Secp256k1AddressType::Cosmos => Sha256::digest(message), }; - // SAFETY: hashed_message is 32 bytes, as expected in `Message::from_slice`, - // so `unwrap` is safe. - let message = Message::from_slice(&hashed_message).unwrap(); + assert!(hashed_message.len() == 32); + + // SAFETY: hashed_message is 32 bytes, as expected in `Message::from_slice`. + let message = Message::from_digest_slice(&hashed_message).unwrap(); Ok(Secp256k1::signing_only() .sign_ecdsa(&message, &self.private_key) diff --git a/crates/relayer/src/lib.rs b/crates/relayer/src/lib.rs index c7fdbcb8da..77030a327b 100644 --- a/crates/relayer/src/lib.rs +++ b/crates/relayer/src/lib.rs @@ -16,7 +16,7 @@ //! //! For the IBC relayer binary, please see [Hermes] (`ibc-relayer-cli` crate). //! -//! [Hermes]: https://docs.rs/ibc-relayer-cli/1.7.2/ +//! [Hermes]: https://docs.rs/ibc-relayer-cli/1.7.4/ extern crate alloc; diff --git a/crates/relayer/src/light_client/tendermint.rs b/crates/relayer/src/light_client/tendermint.rs index 63d2670b1d..c6ac726294 100644 --- a/crates/relayer/src/light_client/tendermint.rs +++ b/crates/relayer/src/light_client/tendermint.rs @@ -204,7 +204,8 @@ impl super::LightClient for LightClient { provider: self.peer_id, }; - let trusted_block = self.fetch(update_header.trusted_height)?; + // Get the light block at trusted_height + 1 from chain. + let trusted_block = self.fetch(update_header.trusted_height.increment())?; if trusted_block.validators.hash() != update_header.trusted_validator_set.hash() { return Err(Error::misbehaviour(format!( "mismatch between the trusted validator set of the update \ diff --git a/crates/relayer/src/link.rs b/crates/relayer/src/link.rs index 1ac999c8ea..873a0f95b0 100644 --- a/crates/relayer/src/link.rs +++ b/crates/relayer/src/link.rs @@ -24,6 +24,7 @@ use crate::{ Channel, ChannelSide, }, + config::types::ics20_field_size_limit::Ics20FieldSizeLimit, link::error::LinkError, }; @@ -50,6 +51,8 @@ use tx_hashes::TxHashes; pub struct LinkParameters { pub src_port_id: PortId, pub src_channel_id: ChannelId, + pub max_memo_size: Ics20FieldSizeLimit, + pub max_receiver_size: Ics20FieldSizeLimit, } pub struct Link { @@ -60,9 +63,10 @@ impl Link { pub fn new( channel: Channel, with_tx_confirmation: bool, + link_parameters: LinkParameters, ) -> Result { Ok(Self { - a_to_b: RelayPath::new(channel, with_tx_confirmation)?, + a_to_b: RelayPath::new(channel, with_tx_confirmation, link_parameters)?, }) } @@ -157,7 +161,7 @@ impl Link { a_connection.client_id().clone(), a_connection_id, opts.src_port_id.clone(), - Some(opts.src_channel_id), + Some(opts.src_channel_id.clone()), None, ), b_side: ChannelSide::new( @@ -186,7 +190,7 @@ impl Link { .map_err(LinkError::relayer)?; } - Link::new(channel, with_tx_confirmation) + Link::new(channel, with_tx_confirmation, opts) } /// Constructs a link around the channel that is reverse to the channel @@ -199,6 +203,8 @@ impl Link { let opts = LinkParameters { src_port_id: self.a_to_b.dst_port_id().clone(), src_channel_id: self.a_to_b.dst_channel_id().clone(), + max_memo_size: self.a_to_b.max_memo_size, + max_receiver_size: self.a_to_b.max_receiver_size, }; let chain_b = self.a_to_b.dst_chain().clone(); let chain_a = self.a_to_b.src_chain().clone(); diff --git a/crates/relayer/src/link/cli.rs b/crates/relayer/src/link/cli.rs index 1dc939f8b5..fd7b78ba34 100644 --- a/crates/relayer/src/link/cli.rs +++ b/crates/relayer/src/link/cli.rs @@ -1,5 +1,6 @@ use std::{ convert::TryInto, + ops::RangeInclusive, thread, time::{ Duration, @@ -10,6 +11,10 @@ use std::{ use ibc_relayer_types::{ core::ics04_channel::packet::Sequence, events::IbcEvent, + utils::pretty::{ + PrettyDuration, + PrettySlice, + }, Height, }; use itertools::Itertools; @@ -41,18 +46,12 @@ use crate::{ query_send_packet_events, query_write_ack_events, }, - relay_path::RelayPath, relay_sender::SyncSender, Link, + RelayPath, }, path::PathIdentifiers, - util::{ - collate::CollatedIterExt, - pretty::{ - PrettyDuration, - PrettySlice, - }, - }, + util::collate::CollatedIterExt as _, }; impl RelayPath { @@ -99,12 +98,16 @@ impl RelayPath { } impl Link { - pub fn relay_recv_packet_and_timeout_messages(&self) -> Result, LinkError> { - self.relay_recv_packet_and_timeout_messages_with_packet_data_query_height(None) + pub fn relay_recv_packet_and_timeout_messages( + &self, + sequences: Vec>, + ) -> Result, LinkError> { + self.relay_recv_packet_and_timeout_messages_with_packet_data_query_height(sequences, None) } /// Implements the `packet-recv` CLI pub fn relay_recv_packet_and_timeout_messages_with_packet_data_query_height( &self, + sequence_filter: Vec>, packet_data_query_height: Option, ) -> Result, LinkError> { let _span = error_span!( @@ -117,7 +120,7 @@ impl Link { .entered(); // Find the sequence numbers of unreceived packets - let (sequences, src_response_height) = unreceived_packets( + let (mut sequences, src_response_height) = unreceived_packets( self.a_to_b.dst_chain(), self.a_to_b.src_chain(), &self.a_to_b.path_id, @@ -128,6 +131,11 @@ impl Link { return Ok(vec![]); } + if !sequence_filter.is_empty() { + info!("filtering unreceived packets by given sequence ranges"); + sequences.retain(|seq| sequence_filter.iter().any(|range| range.contains(seq))); + } + info!( "{} unreceived packets found: {} ", sequences.len(), @@ -139,21 +147,32 @@ impl Link { None => Qualified::SmallerEqual(src_response_height), }; + let chunk_size = self + .a_to_b + .src_chain() + .config() + .map_or(50, |cfg| cfg.query_packets_chunk_size()); + self.relay_packet_messages( sequences, query_height, + chunk_size, query_send_packet_events, TrackingId::new_static("packet-recv"), ) } - pub fn relay_ack_packet_messages(&self) -> Result, LinkError> { - self.relay_ack_packet_messages_with_packet_data_query_height(None) + pub fn relay_ack_packet_messages( + &self, + sequences: Vec>, + ) -> Result, LinkError> { + self.relay_ack_packet_messages_with_packet_data_query_height(sequences, None) } /// Implements the `packet-ack` CLI pub fn relay_ack_packet_messages_with_packet_data_query_height( &self, + sequence_filter: Vec>, packet_data_query_height: Option, ) -> Result, LinkError> { let _span = error_span!( @@ -166,7 +185,7 @@ impl Link { .entered(); // Find the sequence numbers of unreceived acknowledgements - let Some((sequences, src_response_height)) = unreceived_acknowledgements( + let Some((mut sequences, src_response_height)) = unreceived_acknowledgements( self.a_to_b.dst_chain(), self.a_to_b.src_chain(), &self.a_to_b.path_id, @@ -180,6 +199,11 @@ impl Link { return Ok(vec![]); } + if !sequence_filter.is_empty() { + info!("filtering unreceived acknowledgements by given sequence ranges"); + sequences.retain(|seq| sequence_filter.iter().any(|range| range.contains(seq))); + } + info!( "{} unreceived acknowledgements found: {} ", sequences.len(), @@ -191,9 +215,16 @@ impl Link { None => Qualified::SmallerEqual(src_response_height), }; + let chunk_size = self + .a_to_b + .src_chain() + .config() + .map_or(50, |cfg| cfg.query_packets_chunk_size()); + self.relay_packet_messages( sequences, query_height, + chunk_size, query_write_ack_events, TrackingId::new_static("packet-ack"), ) @@ -203,6 +234,7 @@ impl Link { &self, sequences: Vec, query_height: Qualified, + chunk_size: usize, query_fn: QueryFn, tracking_id: TrackingId, ) -> Result, LinkError> @@ -219,6 +251,7 @@ impl Link { query_height, self.a_to_b.src_chain(), &self.a_to_b.path_id, + chunk_size, query_fn, ); diff --git a/crates/relayer/src/link/packet_events.rs b/crates/relayer/src/link/packet_events.rs index 0f08aff401..9a2dcbc589 100644 --- a/crates/relayer/src/link/packet_events.rs +++ b/crates/relayer/src/link/packet_events.rs @@ -28,15 +28,13 @@ use crate::{ util::collate::CollatedIterExt, }; -/// Limit on how many query results should be expected. -pub const CHUNK_LENGTH: usize = 50; - /// Returns an iterator on batches of packet events. pub fn query_packet_events_with<'a, ChainA, QueryFn>( sequences: &'a [Sequence], query_height: Qualified, src_chain: &'a ChainA, path: &'a PathIdentifiers, + chunk_size: usize, query_fn: QueryFn, ) -> impl Iterator> + 'a where @@ -52,7 +50,7 @@ where let events_total = sequences.len(); let mut events_left = events_total; - sequences.chunks(CHUNK_LENGTH).map_while(move |chunk| { + sequences.chunks(chunk_size).map_while(move |chunk| { match query_fn(src_chain, path, chunk, query_height) { Ok(events) => { events_left -= chunk.len(); diff --git a/crates/relayer/src/link/relay_path.rs b/crates/relayer/src/link/relay_path.rs index 62e148f720..2a3a5f228e 100644 --- a/crates/relayer/src/link/relay_path.rs +++ b/crates/relayer/src/link/relay_path.rs @@ -10,15 +10,17 @@ use std::{ }, }; -use ibc_proto::google::protobuf::Any; +use ibc_proto::{ + google::protobuf::Any, + ibc::applications::transfer::v2::FungibleTokenPacketData as RawPacketData, +}; use ibc_relayer_types::{ core::{ - ics02_client::events::ClientMisbehaviour as ClientMisbehaviourEvent, + ics02_client::events::ClientMisbehaviour, ics04_channel::{ channel::{ ChannelEnd, Ordering, - State as ChannelState, }, events::{ SendPacket, @@ -94,6 +96,10 @@ use crate::{ error::ChannelError, Channel, }, + config::types::ics20_field_size_limit::{ + Ics20FieldSizeLimit, + ValidationResult, + }, event::{ source::EventBatch, IbcEventWithHeight, @@ -126,6 +132,8 @@ use crate::{ SubmitReply, }, relay_summary::RelaySummary, + ChannelState, + LinkParameters, }, path::PathIdentifiers, telemetry, @@ -182,12 +190,16 @@ pub struct RelayPath { // transactions if [`confirm_txes`] is true. pending_txs_src: PendingTxs, pending_txs_dst: PendingTxs, + + pub max_memo_size: Ics20FieldSizeLimit, + pub max_receiver_size: Ics20FieldSizeLimit, } impl RelayPath { pub fn new( channel: Channel, with_tx_confirmation: bool, + link_parameters: LinkParameters, ) -> Result { let src_chain = channel.src_chain().clone(); let dst_chain = channel.dst_chain().clone(); @@ -226,6 +238,9 @@ impl RelayPath { confirm_txes: with_tx_confirmation, pending_txs_src: PendingTxs::new(src_chain, src_channel_id, src_port_id, dst_chain_id), pending_txs_dst: PendingTxs::new(dst_chain, dst_channel_id, dst_port_id, src_chain_id), + + max_memo_size: link_parameters.max_memo_size, + max_receiver_size: link_parameters.max_receiver_size, }) } @@ -492,9 +507,14 @@ impl RelayPath { let tracking_id = TrackingId::new_cleared_uuid(); telemetry!(received_event_batch, tracking_id); + let src_config = self.src_chain().config().map_err(LinkError::relayer)?; + let chunk_size = src_config.query_packets_chunk_size(); + for i in 1..=MAX_RETRIES { - let cleared_recv = self.schedule_recv_packet_and_timeout_msgs(height, tracking_id); - let cleared_ack = self.schedule_packet_ack_msgs(height, tracking_id); + let cleared_recv = + self.schedule_recv_packet_and_timeout_msgs(height, chunk_size, tracking_id); + + let cleared_ack = self.schedule_packet_ack_msgs(height, chunk_size, tracking_id); match cleared_recv.and(cleared_ack) { Ok(()) => return Ok(()), @@ -587,7 +607,7 @@ impl RelayPath { .entered(); let input = events.events(); - let src_height = match input.get(0) { + let src_height = match input.first() { None => return Ok((None, None)), Some(ev) => ev.height, }; @@ -618,6 +638,18 @@ impl RelayPath { for event_with_height in input { trace!(event = %event_with_height, "processing event"); + if let Some(packet) = event_with_height.event.packet() { + // If the event is a ICS-04 packet event, and the packet contains ICS-20 + // packet data, check that the ICS-20 fields are within the configured limits. + if !check_ics20_fields_size( + &packet.data, + self.max_memo_size, + self.max_receiver_size, + ) { + continue; + } + } + let (dst_msg, src_msg) = match &event_with_height.event { IbcEvent::CloseInitChannel(_) => ( self.build_chan_close_confirm_from_event(event_with_height)?, @@ -984,11 +1016,7 @@ impl RelayPath { #[inline] fn event_per_type( mut tx_events: Vec, - ) -> ( - Option, - Option, - Option, - ) { + ) -> (Option, Option, Option) { let mut error = None; let mut update = None; let mut misbehaviour = None; @@ -1062,7 +1090,7 @@ impl RelayPath { // All updates were successful, no errors and no misbehaviour. (None, Some(update_event_height), None) => Ok(update_event_height), (Some(chain_error), _, _) => { - // Atleast one chain-error so retry if possible. + // At least one chain-error so retry if possible. if retries_left == 0 { Err(LinkError::client(ForeignClientError::chain_error_event( self.dst_chain().id(), @@ -1086,7 +1114,7 @@ impl RelayPath { _ => Err(LinkError::update_client_failed()), } } - // Atleast one misbehaviour event, so don't retry. + // At least one misbehaviour event, so don't retry. (_, _, Some(_misbehaviour)) => Err(LinkError::update_client_failed()), } } @@ -1129,7 +1157,7 @@ impl RelayPath { // All updates were successful, no errors and no misbehaviour. (None, Some(update_event_height), None) => Ok(update_event_height), (Some(chain_error), _, _) => { - // Atleast one chain-error so retry if possible. + // At least one chain-error so retry if possible. if retries_left == 0 { Err(LinkError::client(ForeignClientError::chain_error_event( self.src_chain().id(), @@ -1168,6 +1196,7 @@ impl RelayPath { pub fn schedule_recv_packet_and_timeout_msgs( &self, opt_query_height: Option, + chunk_size: usize, tracking_id: TrackingId, ) -> Result<(), LinkError> { let _span = span!( @@ -1204,6 +1233,7 @@ impl RelayPath { Qualified::SmallerEqual(query_height), self.src_chain(), &self.path_id, + chunk_size, query_send_packet_events, ) { // Update telemetry info @@ -1227,6 +1257,7 @@ impl RelayPath { pub fn schedule_packet_ack_msgs( &self, opt_query_height: Option, + chunk_size: usize, tracking_id: TrackingId, ) -> Result<(), LinkError> { let _span = span!( @@ -1265,6 +1296,7 @@ impl RelayPath { Qualified::SmallerEqual(query_height), self.src_chain(), &self.path_id, + chunk_size, query_write_ack_events, ) { telemetry!(self.record_cleared_acknowledgments(events_chunk.iter())); @@ -1428,6 +1460,7 @@ impl RelayPath { dst_info: &ChainStatus, ) -> Result, LinkError> { let packet = event.packet.clone(); + if self .dst_channel(QueryHeight::Specific(dst_info.height))? .state_matches(&ChannelState::Closed) @@ -1446,7 +1479,16 @@ impl RelayPath { dst_info: &ChainStatus, height: Height, ) -> Result<(Option, Option), LinkError> { + crate::time!( + "build_recv_or_timeout_from_send_packet_event", + { + "src_channel": event.packet.source_channel, + "dst_channel": event.packet.destination_channel, + } + ); + let timeout = self.build_timeout_from_send_packet_event(event, dst_info)?; + if timeout.is_some() { Ok((None, timeout)) } else { @@ -1951,3 +1993,34 @@ impl RelayPath { } } } + +#[tracing::instrument(skip(data))] +fn check_ics20_fields_size( + data: &[u8], + memo_limit: Ics20FieldSizeLimit, + receiver_limit: Ics20FieldSizeLimit, +) -> bool { + match serde_json::from_slice::(data) { + Ok(packet_data) => { + match ( + memo_limit.check_field_size(&packet_data.memo), + receiver_limit.check_field_size(&packet_data.receiver), + ) { + (ValidationResult::Valid, ValidationResult::Valid) => true, + + (memo_validity, receiver_validity) => { + debug!("found invalid ICS-20 packet data, not relaying packet!"); + debug!(" ICS-20 memo: {memo_validity}"); + debug!(" ICS-20 receiver: {receiver_validity}"); + + false + } + } + } + Err(e) => { + trace!("failed to decode ICS20 packet data with error `{e}`"); + + true + } + } +} diff --git a/crates/relayer/src/object.rs b/crates/relayer/src/object.rs index bdeab83a16..d1ac7e2a09 100644 --- a/crates/relayer/src/object.rs +++ b/crates/relayer/src/object.rs @@ -379,12 +379,6 @@ impl Object { ) .map_err(ObjectError::relayer)?; - if client_state.refresh_period().is_none() { - return Err(ObjectError::refresh_not_required( - e.client_id().clone(), - dst_chain.id(), - )); - } let src_chain_id = client_state.chain_id(); Ok(Client { @@ -408,13 +402,6 @@ impl Object { .map_err(ObjectError::supervisor)? .client; - if client.client_state.refresh_period().is_none() { - return Err(ObjectError::refresh_not_required( - client.client_id, - chain.id(), - )); - } - Ok(Client { dst_client_id: client.client_id.clone(), dst_chain_id: chain.id(), // The object's destination is the chain hosting the client diff --git a/crates/relayer/src/util.rs b/crates/relayer/src/util.rs index e366c3e2f4..4233f9d6e5 100644 --- a/crates/relayer/src/util.rs +++ b/crates/relayer/src/util.rs @@ -14,5 +14,6 @@ pub mod pretty; pub mod profiling; pub mod queue; pub mod retry; +pub mod seq_range; pub mod stream; pub mod task; diff --git a/crates/relayer/src/util/collate.rs b/crates/relayer/src/util/collate.rs index 1122e2fedd..4190c1e98b 100644 --- a/crates/relayer/src/util/collate.rs +++ b/crates/relayer/src/util/collate.rs @@ -8,7 +8,7 @@ use serde::{ Serialize, }; -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Collated { pub start: T, pub end: T, @@ -20,6 +20,12 @@ impl Collated { } } +impl fmt::Debug for Collated { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}..={:?}", self.start, self.end) + } +} + impl fmt::Display for Collated { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}..={}", self.start, self.end) diff --git a/crates/relayer/src/util/pretty.rs b/crates/relayer/src/util/pretty.rs index 026f25e948..5124b7b410 100644 --- a/crates/relayer/src/util/pretty.rs +++ b/crates/relayer/src/util/pretty.rs @@ -127,7 +127,7 @@ pub struct PrettyFee<'a>(pub &'a Fee); impl Display for PrettyFee<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> { - let amount = match self.0.amount.get(0) { + let amount = match self.0.amount.first() { Some(coin) => format!("{}{}", coin.amount, coin.denom), None => "".to_string(), }; diff --git a/crates/relayer/src/util/seq_range.rs b/crates/relayer/src/util/seq_range.rs new file mode 100644 index 0000000000..d569d156ef --- /dev/null +++ b/crates/relayer/src/util/seq_range.rs @@ -0,0 +1,139 @@ +use std::ops::RangeInclusive; + +use ibc_relayer_types::core::ics04_channel::packet::Sequence; +use thiserror::Error; + +#[derive(Clone, Debug, PartialEq, Eq, Error)] +pub enum Error { + #[error("Invalid sequence number: {0}")] + InvalidSequenceNumber(String), + + #[error("Invalid range: {0}")] + InvalidRange(String), +} + +/// Parse a list of ranges over sequence numbers, separated by commas. +/// +/// - Each item in the list is either a single sequence number, or a range of sequence numbers. +/// - A range is specified as `start..end`, where `start` and `end` are sequence numbers. +/// - If `start` is omitted, the range starts at the minimum sequence number. +/// - If `end` is omitted, the range ends at the maximum sequence number. +/// - If both `start` and `end` are omitted, the range sastifies any sequence number. +/// +/// # Examples +/// - `1` Single sequence number `1` +/// - `1,2,3` Sequence numbers `1`, `2`, and `3` +/// - `..20` Sequence numbers less than or equal to `20` +/// - `10..` Sequence numbers greater than or equal to `10` +/// - `10..20` Sequence numbers `10`, `11`, `12`, ..., `20` +/// - `2,4..6,12,14..17,21,30..` Sequence numbers `2`, `4`, `5`, `6`, `12`, `14`, `15`, `16`, `17`, `21`, `30`, `31`, `32`, ... +/// - `30..,21,12,14..17,4..6,2` Same as previous +/// - `..` Any sequence number +pub fn parse_seq_ranges(s: &str) -> Result>, Error> { + s.split(',').map(parse_seq_range).collect() +} + +/// Parse a range of sequence numbers. +/// +/// - This can be a single sequence number, or a range of sequence numbers. +/// - A range is specified as `start..end`, where `start` and `end` are sequence numbers. +/// - If `start` is omitted, the range starts at the minimum sequence number. +/// - If `end` is omitted, the range ends at the maximum sequence number.` +/// - If both `start` and `end` are omitted, the range sastifies any sequence number. +/// +/// # Examples +/// - `1` Single sequence number `1` +/// - `1..2` Single sequence number `1` +/// - `..20` Sequence numbers strictly less than `20` +/// - `10..` Sequence numbers greater than or equal to `10` +/// - `10..20` Sequence numbers `10`, `11`, `12`, ..., `19` +/// - `..` Any sequence number +pub fn parse_seq_range(s: &str) -> Result, Error> { + if s.contains("..") { + parse_range(s) + } else { + parse_single(s) + } +} + +fn parse_int(s: &str) -> Result { + s.parse::() + .map_err(|_| Error::InvalidSequenceNumber(s.to_string())) +} + +fn parse_single(s: &str) -> Result, Error> { + parse_int(s).map(|num| num..=num) +} + +fn parse_range(s: &str) -> Result, Error> { + match s.split_once("..") { + // .. + Some(("", "")) => Ok(Sequence::MIN..=Sequence::MAX), + + // ..end + Some(("", end)) => { + let end = parse_int(end)?; + Ok(Sequence::MIN..=end) + } + + // start.. + Some((start, "")) => { + let start = parse_int(start)?; + Ok(start..=Sequence::MAX) + } + + // start..end + Some((start, end)) => { + let start = parse_int(start)?; + let end = parse_int(end)?; + Ok(start..=end) + } + + // not a range + None => Err(Error::InvalidRange(s.to_string())), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn r(range: RangeInclusive) -> RangeInclusive { + Sequence::from(*range.start())..=Sequence::from(*range.end()) + } + + #[test] + fn parse_seq_ranges_works() { + let tests = [ + ("1", vec![r(1..=1)]), + ("1,2", vec![r(1..=1), r(2..=2)]), + ("1,2,3", vec![r(1..=1), r(2..=2), r(3..=3)]), + ("1..3", vec![r(1..=3)]), + ("..3", vec![r(u64::MIN..=3)]), + ("3..", vec![r(3..=u64::MAX)]), + ("..", vec![r(u64::MIN..=u64::MAX)]), + ("1..3,4", vec![r(1..=3), r(4..=4)]), + ("1,2..4", vec![r(1..=1), r(2..=4)]), + ("1..3,4..6", vec![r(1..=3), r(4..=6)]), + ( + "..3,4..,..", + vec![r(u64::MIN..=3), r(4..=u64::MAX), r(u64::MIN..=u64::MAX)], + ), + ( + "1..,..6,7..7", + vec![r(1..=u64::MAX), r(u64::MIN..=6), r(7..=7)], + ), + ]; + + for (input, expected) in tests { + let actual = parse_seq_ranges(input).unwrap(); + assert_eq!(actual, expected); + } + + let fails = ["1-1", "1.1", "-1", "1..2..3", "1..-2", "-1.22"]; + + for fail in fails { + assert!(parse_seq_ranges(fail).is_err()); + } + } +} diff --git a/crates/relayer/src/util/task.rs b/crates/relayer/src/util/task.rs index e4c4e7e77b..32c63cd70d 100644 --- a/crates/relayer/src/util/task.rs +++ b/crates/relayer/src/util/task.rs @@ -53,14 +53,14 @@ struct DropJoinHandle(Option>); */ pub enum TaskError { /** - Inform the background task runner that an ignorable error has occured, + Inform the background task runner that an ignorable error has occurred, and the background task runner should log the error and then continue execution. */ Ignore(E), /** - Inform the background task runner that a fatal error has occured, + Inform the background task runner that a fatal error has occurred, and the background task runner should log the error and then abort execution. */ @@ -76,7 +76,7 @@ pub enum Next { Spawn a long-running background task with the given step runner. The step runner is a `FnMut` closure that is called repeatedly and - returns a `Result<(), TaskError>`. If the step is executed successfuly, + returns a `Result<(), TaskError>`. If the step is executed successfully, the step runner should return `Ok(())` so that it will be called again. Otherwise if errors occurred or of the task needs to be aborted, diff --git a/crates/relayer/src/worker.rs b/crates/relayer/src/worker.rs index f4de6d1c9b..71df19994e 100644 --- a/crates/relayer/src/worker.rs +++ b/crates/relayer/src/worker.rs @@ -135,6 +135,8 @@ pub fn spawn_worker_tasks( LinkParameters { src_port_id: path.src_port_id.clone(), src_channel_id: path.src_channel_id.clone(), + max_memo_size: packets_config.ics20_max_memo_size, + max_receiver_size: packets_config.ics20_max_receiver_size, }, packets_config.tx_confirmation, packets_config.auto_register_counterparty_payee, diff --git a/crates/relayer/src/worker/client.rs b/crates/relayer/src/worker/client.rs index 488ae85489..068b062040 100644 --- a/crates/relayer/src/worker/client.rs +++ b/crates/relayer/src/worker/client.rs @@ -2,7 +2,6 @@ use core::{ convert::Infallible, time::Duration, }; -use std::time::Instant; use crossbeam_channel::Receiver; use ibc_relayer_types::{ @@ -39,8 +38,8 @@ use crate::{ }, }; -const REFRESH_INTERVAL: Duration = Duration::from_secs(2); // 2 seconds -const INITIAL_BACKOFF: Duration = Duration::from_secs(1); // 1 second +const REFRESH_CHECK_INTERVAL: Duration = Duration::from_secs(5); // 5 seconds +const INITIAL_BACKOFF: Duration = Duration::from_secs(5); // 5 seconds const MAX_REFRESH_DELAY: Duration = Duration::from_secs(60 * 60); // 1 hour const MAX_REFRESH_TOTAL_DELAY: Duration = Duration::from_secs(60 * 60 * 24); // 1 day @@ -56,9 +55,6 @@ pub fn spawn_refresh_client( return None; } - // Compute the refresh interval as a fraction of the client's trusting period - // If the trusting period or the client state is not retrieved, fallback to a default value. - let mut next_refresh = Instant::now() + REFRESH_INTERVAL; Some(spawn_background_task( error_span!( "worker.client.refresh", @@ -66,24 +62,16 @@ pub fn spawn_refresh_client( src_chain = %client.src_chain.id(), dst_chain = %client.dst_chain.id(), ), - Some(Duration::from_secs(1)), + Some(REFRESH_CHECK_INTERVAL), move || { - // This is used for integration tests until `spawn_background_task` - // uses async instead of threads - if Instant::now() < next_refresh { - return Ok(Next::Continue); - } - - // Use retry mechanism only if `client.refresh()` fails. + // Try to refresh the client, but only if the refresh window has expired. + // If the refresh fails, retry according to the given strategy. let res = retry_with_index(refresh_strategy(), |_| client.refresh()); match res { - // If `client.refresh()` was successful, update the `next_refresh` call. - Ok(_) => { - next_refresh = Instant::now() + REFRESH_INTERVAL; + // If `client.refresh()` was successful, continue + Ok(_) => Ok(Next::Continue), - Ok(Next::Continue) - } // If `client.refresh()` failed and the retry mechanism // exceeded the maximum delay, return a fatal error. Err(e) => Err(TaskError::Fatal(e)), diff --git a/crates/relayer/src/worker/cross_chain_query.rs b/crates/relayer/src/worker/cross_chain_query.rs index efdfe46b5c..04b6a05013 100644 --- a/crates/relayer/src/worker/cross_chain_query.rs +++ b/crates/relayer/src/worker/cross_chain_query.rs @@ -116,7 +116,7 @@ fn handle_cross_chain_query( let target_height = Height::new( chain_b_handle.id().version(), - cross_chain_query_responses.get(0).unwrap().height as u64, + cross_chain_query_responses.first().unwrap().height as u64, ) .map_err(|_| TaskError::Fatal(RunError::query()))? .increment(); diff --git a/crates/relayer/src/worker/map.rs b/crates/relayer/src/worker/map.rs index 6261824745..367057152d 100644 --- a/crates/relayer/src/worker/map.rs +++ b/crates/relayer/src/worker/map.rs @@ -138,7 +138,17 @@ impl WorkerMap { config: &Config, ) -> &WorkerHandle { if self.workers.contains_key(&object) { - &self.workers[&object] + if self.workers[&object].shutdown_stopped_tasks() { + self.remove_stopped( + self.workers[&object].id(), + self.workers[&object].object().clone(), + ); + + let worker = self.spawn_worker(src, dst, &object, config); + self.workers.entry(object).or_insert(worker) + } else { + &self.workers[&object] + } } else { let worker = self.spawn_worker(src, dst, &object, config); self.workers.entry(object).or_insert(worker) diff --git a/crates/relayer/src/worker/packet.rs b/crates/relayer/src/worker/packet.rs index 1f23c9f002..d175480a30 100644 --- a/crates/relayer/src/worker/packet.rs +++ b/crates/relayer/src/worker/packet.rs @@ -168,7 +168,7 @@ pub fn spawn_packet_cmd_worker( if is_new_batch { idle_worker_timer = 0; - trace!("packet worker processed an event batch, reseting idle timer"); + trace!("packet worker processed an event batch, resetting idle timer"); } else { idle_worker_timer += 1; trace!("packet worker has not processed an event batch after {idle_worker_timer} blocks, incrementing idle timer"); diff --git a/crates/relayer/tests/config/fixtures/relayer_conf_example.toml b/crates/relayer/tests/config/fixtures/relayer_conf_example.toml index 3665bb595f..f864efecda 100644 --- a/crates/relayer/tests/config/fixtures/relayer_conf_example.toml +++ b/crates/relayer/tests/config/fixtures/relayer_conf_example.toml @@ -19,6 +19,8 @@ enabled = true clear_interval = 100 clear_on_start = true tx_confirmation = true +ics20_max_memo_size = { enabled = true, size = "32KiB" } +ics20_max_receiver_size = { enabled = true, size = "2KiB" } [[chains]] type = "CosmosSdk" diff --git a/crates/telemetry/Cargo.toml b/crates/telemetry/Cargo.toml index c7c2bdb2b6..4f4d6838e5 100644 --- a/crates/telemetry/Cargo.toml +++ b/crates/telemetry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-telemetry" -version = "0.26.3" +version = "0.26.4" edition = "2021" license = "Apache-2.0" readme = "README.md" @@ -13,16 +13,16 @@ description = """ """ [dependencies] -ibc-relayer-types = { version = "0.26.3", path = "../relayer-types" } +ibc-relayer-types = { version = "0.26.4", path = "../relayer-types" } -once_cell = "1.17.0" +once_cell = "1.19.0" opentelemetry = { version = "0.19.0", features = ["metrics"] } opentelemetry-prometheus = "0.12.0" prometheus = "0.13.2" moka = { version = "0.12.0", features = ["sync"] } dashmap = "5.4.0" serde_json = "1.0.94" -serde = "1.0.166" +serde = "1.0.195" axum = "0.6.18" tokio = "1.26.0" tracing = "0.1.36" diff --git a/crates/telemetry/src/broadcast_error.rs b/crates/telemetry/src/broadcast_error.rs new file mode 100644 index 0000000000..1d092f08d6 --- /dev/null +++ b/crates/telemetry/src/broadcast_error.rs @@ -0,0 +1,429 @@ +//! The BroadcastError is used by the telemetry in order to correctly batch +//! together the error reports from ibc-go or Cosmos SDK. +//! When a broadcast error is received by Hermes it will contain a code and +//! a description, but the code description depends on the source of the error. +//! For example Cosmos SDK error code 13 is "insufficient fee", and error +//! code 13 for Ibc Go is "invalid packet". +//! The description might contain some variables for example error 32 would be: +//! "account sequence mismatch, expected 1234, got 1235: incorrect account sequence" +//! The BroadcastError will reduce the description to simple: "incorrect account sequence" +//! +//! Cosmos SDK errors: +//! Ibc Go errors: + +pub struct BroadcastError { + pub code: u32, + pub description: String, +} + +impl BroadcastError { + pub fn new(code: u32, description: &str) -> Self { + let short_description = get_short_description(code, description); + Self { + code, + description: short_description, + } + } +} + +fn get_short_description(code: u32, description: &str) -> String { + match code { + 2 => { + let sdk_error = "tx parse error"; + let ibc_go_error = "channel already exists"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 3 => { + let sdk_error = "invalid sequence"; + let ibc_go_error = "channel not found"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 4 => { + let sdk_error = "unauthorized"; + let ibc_go_error = "invalid channel"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 5 => { + let sdk_error = "insufficient funds"; + let ibc_go_error = "invalid channel state"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 6 => { + let sdk_error = "unknown request"; + let ibc_go_error = "invalid channel ordering"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 7 => { + let sdk_error = "invalid address"; + let ibc_go_error = "invalid counterparty channel"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 8 => { + let sdk_error = "invalid pubkey"; + let ibc_go_error = "invalid channel capability"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 9 => { + let sdk_error = "unknown address"; + let ibc_go_error = "channel capability not found"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 10 => { + let sdk_error = "invalid coins"; + let ibc_go_error = "sequence send not found"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 11 => { + let sdk_error = "out of gas"; + let ibc_go_error = "sequence receive not found"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 12 => { + let sdk_error = "memo too large"; + let ibc_go_error = "sequence acknowledgement not found"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 13 => { + let sdk_error = "insufficient fee"; + let ibc_go_error = "invalid packet"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 14 => { + let sdk_error = "maximum number of signatures exceeded"; + let ibc_go_error = "packet timeout"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 15 => { + let sdk_error = "no signatures supplied"; + let ibc_go_error = "too many connection hops"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 16 => { + let sdk_error = "failed to marshal JSON bytes"; + let ibc_go_error = "invalid acknowledgement"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 17 => { + let sdk_error = "failed to unmarshal JSON bytes"; + let ibc_go_error = "acknowledgement for packet already exists"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 18 => { + let sdk_error = "invalid request"; + let ibc_go_error = "invalid channel identifier"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 19 => { + let sdk_error = "tx already in mempool"; + let ibc_go_error = "packet already received"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 20 => { + let sdk_error = "mempool is full"; + let ibc_go_error = "packet commitment not found"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 21 => { + let sdk_error = "tx too large"; + let ibc_go_error = "packet sequence is out of order"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 22 => { + let sdk_error = "key not found"; + let ibc_go_error = "packet messages are redundant"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 23 => { + let sdk_error = "invalid account password"; + let ibc_go_error = "message is redundant, no-op will be performed"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 24 => { + let sdk_error = "tx intended signer does not match the given signer"; + let ibc_go_error = "invalid channel version"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 25 => { + let sdk_error = "invalid gas adjustment"; + let ibc_go_error = "packet has not been sent"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 26 => { + let sdk_error = "invalid height"; + let ibc_go_error = "invalid packet timeout"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else if description.contains(ibc_go_error) { + Some(ibc_go_error.to_owned()) + } else { + None + } + } + 27 => { + let sdk_error = "invalid version"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 28 => { + let sdk_error = "invalid chain-id"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 29 => { + let sdk_error = "invalid type"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 30 => { + let sdk_error = "tx timeout height"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 31 => { + let sdk_error = "unknown extension options"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 32 => { + let sdk_error = "incorrect account sequence"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 33 => { + let sdk_error = "failed packing protobuf message to Any"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 34 => { + let sdk_error = "failed unpacking protobuf message from Any"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 35 => { + let sdk_error = "internal logic error"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 36 => { + let sdk_error = "conflict"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 37 => { + let sdk_error = "feature not supported"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 38 => { + let sdk_error = "not found"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 39 => { + let sdk_error = "Internal IO error"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 40 => { + let sdk_error = "error in app.toml"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + 41 => { + let sdk_error = "invalid gas limit"; + if description.contains(sdk_error) { + Some(sdk_error.to_owned()) + } else { + None + } + } + _ => None, + } + .unwrap_or_else(|| "unknown error".to_owned()) +} diff --git a/crates/telemetry/src/lib.rs b/crates/telemetry/src/lib.rs index 80b3acb547..ccd1cd1767 100644 --- a/crates/telemetry/src/lib.rs +++ b/crates/telemetry/src/lib.rs @@ -1,3 +1,4 @@ +pub mod broadcast_error; pub mod encoder; mod path_identifier; pub mod server; diff --git a/crates/telemetry/src/state.rs b/crates/telemetry/src/state.rs index 1dcf94be7f..c4eb3e4ea5 100644 --- a/crates/telemetry/src/state.rs +++ b/crates/telemetry/src/state.rs @@ -40,7 +40,10 @@ use opentelemetry_prometheus::PrometheusExporter; use prometheus::proto::MetricFamily; use tendermint::Time; -use crate::path_identifier::PathIdentifier; +use crate::{ + broadcast_error::BroadcastError, + path_identifier::PathIdentifier, +}; const EMPTY_BACKLOG_SYMBOL: u64 = 0; const BACKLOG_CAPACITY: usize = 1000; @@ -188,9 +191,9 @@ pub struct TelemetryState { /// SendPacket events were relayed. backlog_oldest_sequence: ObservableGauge, - /// Record the timestamp related to `backlog_oldest_sequence`. + /// Record the timestamp of the last time the `backlog_*` metrics have been updated. /// The timestamp is the time passed since since the unix epoch in seconds. - backlog_oldest_timestamp: ObservableGauge, + backlog_latest_update_timestamp: ObservableGauge, /// Records the length of the backlog, i.e., how many packets are pending. backlog_size: ObservableGauge, @@ -373,10 +376,10 @@ impl TelemetryState { .with_description("Sequence number of the oldest SendPacket event in the backlog") .init(), - backlog_oldest_timestamp: meter - .u64_observable_gauge("backlog_oldest_timestamp") + backlog_latest_update_timestamp: meter + .u64_observable_gauge("backlog_latest_update_timestamp") .with_unit(Unit::new("seconds")) - .with_description("Local timestamp for the oldest SendPacket event in the backlog") + .with_description("Local timestamp for the last time the backlog metrics have been updated") .init(), backlog_size: meter @@ -480,7 +483,7 @@ impl TelemetryState { } self.backlog_oldest_sequence.observe(&cx, 0, labels); - self.backlog_oldest_timestamp.observe(&cx, 0, labels); + self.backlog_latest_update_timestamp.observe(&cx, 0, labels); self.backlog_size.observe(&cx, 0, labels); } @@ -945,8 +948,7 @@ impl TelemetryState { }; // Update the backlog with the incoming data and retrieve the oldest values - let (oldest_sn, oldest_ts, total) = if let Some(path_backlog) = self.backlogs.get(&path_uid) - { + let (oldest_sn, total) = if let Some(path_backlog) = self.backlogs.get(&path_uid) { // Avoid having the inner backlog map growing more than a given threshold, by removing // the oldest sequence number entry. if path_backlog.len() > BACKLOG_RESET_THRESHOLD { @@ -958,20 +960,11 @@ impl TelemetryState { // Return the oldest event information to be recorded in telemetry if let Some(min) = path_backlog.iter().map(|v| *v.key()).min() { - if let Some(oldest) = path_backlog.get(&min) { - (min, *oldest.value(), path_backlog.len() as u64) - } else { - // Timestamp was not found, this should not happen, record a 0 ts. - (min, 0, path_backlog.len() as u64) - } + (min, path_backlog.len() as u64) } else { // We just inserted a new key/value, so this else branch is unlikely to activate, // but it can happen in case of concurrent updates to the backlog. - ( - EMPTY_BACKLOG_SYMBOL, - EMPTY_BACKLOG_SYMBOL, - EMPTY_BACKLOG_SYMBOL, - ) + (EMPTY_BACKLOG_SYMBOL, EMPTY_BACKLOG_SYMBOL) } } else { // If there is no inner backlog for this path, create a new map to store it. @@ -981,16 +974,57 @@ impl TelemetryState { self.backlogs.insert(path_uid, new_path_backlog); // Return the current event information to be recorded in telemetry - (seq_nr, timestamp, 1) + (seq_nr, 1) }; // Update metrics to reflect the new state of the backlog self.backlog_oldest_sequence.observe(&cx, oldest_sn, labels); - self.backlog_oldest_timestamp - .observe(&cx, oldest_ts, labels); + self.backlog_latest_update_timestamp + .observe(&cx, timestamp, labels); self.backlog_size.observe(&cx, total, labels); } + /// Inserts in the backlog a new event for the given sequence number. + /// This happens when the relayer observed a new SendPacket event. + pub fn update_backlog( + &self, + sequences: Vec, + chain_id: &ChainId, + channel_id: &ChannelId, + port_id: &PortId, + counterparty_chain_id: &ChainId, + ) { + // Unique identifier for a chain/channel/port. + let path_uid: PathIdentifier = PathIdentifier::new( + chain_id.to_string(), + channel_id.to_string(), + port_id.to_string(), + ); + + // This condition is done in order to avoid having an incorrect `backlog_latest_update_timestamp`. + // If the sequences is an empty vector by removing the entries using `backlog_remove` the `backlog_latest_update_timestamp` + // will only be updated if the current backlog is not empty. + // If the sequences is not empty, then it is possible to simple remove the backlog for that path and insert the sequences. + if sequences.is_empty() { + if let Some(path_backlog) = self.backlogs.get(&path_uid) { + let current_keys: Vec = path_backlog + .value() + .iter() + .map(|entry| *entry.key()) + .collect(); + + for key in current_keys.iter() { + self.backlog_remove(*key, chain_id, channel_id, port_id, counterparty_chain_id) + } + } + } else { + self.backlogs.remove(&path_uid); + for key in sequences.iter() { + self.backlog_insert(*key, chain_id, channel_id, port_id, counterparty_chain_id) + } + } + } + /// Evicts from the backlog the event for the given sequence number. /// Removing events happens when the relayer observed either an acknowledgment /// or a timeout for a packet sequence number, which means that the corresponding @@ -1019,25 +1053,27 @@ impl TelemetryState { KeyValue::new("port", port_id.to_string()), ]; + // Retrieve local timestamp when this SendPacket event was recorded. + let now = Time::now(); + let timestamp = match now.duration_since(Time::unix_epoch()) { + Ok(ts) => ts.as_secs(), + Err(_) => 0, + }; + if let Some(path_backlog) = self.backlogs.get(&path_uid) { if path_backlog.remove(&seq_nr).is_some() { + // If the entry was removed update the latest update timestamp. + self.backlog_latest_update_timestamp + .observe(&cx, timestamp, labels); // The oldest pending sequence number is the minimum key in the inner (path) backlog. if let Some(min_key) = path_backlog.iter().map(|v| *v.key()).min() { - if let Some(oldest) = path_backlog.get(&min_key) { - self.backlog_oldest_timestamp - .observe(&cx, *oldest.value(), labels); - } else { - self.backlog_oldest_timestamp.observe(&cx, 0, labels); - } self.backlog_oldest_sequence.observe(&cx, min_key, labels); self.backlog_size .observe(&cx, path_backlog.len() as u64, labels); } else { - // No mimimum found, update the metrics to reflect an empty backlog + // No minimum found, update the metrics to reflect an empty backlog self.backlog_oldest_sequence .observe(&cx, EMPTY_BACKLOG_SYMBOL, labels); - self.backlog_oldest_timestamp - .observe(&cx, EMPTY_BACKLOG_SYMBOL, labels); self.backlog_size.observe(&cx, EMPTY_BACKLOG_SYMBOL, labels); } } @@ -1105,13 +1141,14 @@ impl TelemetryState { /// Add an error and its description to the list of errors observed after broadcasting /// a Tx with a specific account. - pub fn broadcast_errors(&self, address: &String, error_code: u32, error_description: &String) { + pub fn broadcast_errors(&self, address: &String, error_code: u32, error_description: &str) { let cx = Context::current(); + let broadcast_error = BroadcastError::new(error_code, error_description); let labels = &[ KeyValue::new("account", address.to_string()), - KeyValue::new("error_code", error_code.to_string()), - KeyValue::new("error_description", error_description.to_string()), + KeyValue::new("error_code", broadcast_error.code.to_string()), + KeyValue::new("error_description", broadcast_error.description), ]; self.broadcast_errors.add(&cx, 1, labels); @@ -1188,7 +1225,7 @@ impl AggregatorSelector for CustomAggregatorSelector { match descriptor.name() { "wallet_balance" => Some(Arc::new(last_value())), "backlog_oldest_sequence" => Some(Arc::new(last_value())), - "backlog_oldest_timestamp" => Some(Arc::new(last_value())), + "backlog_latest_update_timestamp" => Some(Arc::new(last_value())), "backlog_size" => Some(Arc::new(last_value())), // Prometheus' supports only collector for histogram, sum, and last value aggregators. // https://docs.rs/opentelemetry-prometheus/0.10.0/src/opentelemetry_prometheus/lib.rs.html#411-418 @@ -1200,3 +1237,169 @@ impl AggregatorSelector for CustomAggregatorSelector { } } } + +#[cfg(test)] +mod tests { + use prometheus::proto::Metric; + + use super::*; + + #[test] + fn insert_remove_backlog() { + let state = TelemetryState::new( + Range { + start: 0, + end: 5000, + }, + 5, + Range { + start: 0, + end: 5000, + }, + 5, + ); + + let chain_id = ChainId::from_string("chain-test"); + let counterparty_chain_id = ChainId::from_string("counterpartychain-test"); + let channel_id = ChannelId::new(0); + let port_id = PortId::transfer(); + + state.backlog_insert(1, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_insert(2, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_insert(3, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_insert(4, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_insert(5, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_remove(3, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_remove(1, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + + let metrics = state.exporter.registry().gather().clone(); + let backlog_size = metrics + .iter() + .find(|metric| metric.get_name() == "backlog_size") + .unwrap(); + assert!( + assert_metric_value(backlog_size.get_metric(), 3), + "expected backlog_size to be 3" + ); + let backlog_oldest_sequence = metrics + .iter() + .find(|&metric| metric.get_name() == "backlog_oldest_sequence") + .unwrap(); + assert!( + assert_metric_value(backlog_oldest_sequence.get_metric(), 2), + "expected backlog_oldest_sequence to be 2" + ); + } + + #[test] + fn update_backlog() { + let state = TelemetryState::new( + Range { + start: 0, + end: 5000, + }, + 5, + Range { + start: 0, + end: 5000, + }, + 5, + ); + + let chain_id = ChainId::from_string("chain-test"); + let counterparty_chain_id = ChainId::from_string("counterpartychain-test"); + let channel_id = ChannelId::new(0); + let port_id = PortId::transfer(); + + state.backlog_insert(1, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_insert(2, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_insert(3, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_insert(4, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_insert(5, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + + state.update_backlog( + vec![5], + &chain_id, + &channel_id, + &port_id, + &counterparty_chain_id, + ); + + let metrics = state.exporter.registry().gather().clone(); + let backlog_size = metrics + .iter() + .find(|&metric| metric.get_name() == "backlog_size") + .unwrap(); + assert!( + assert_metric_value(backlog_size.get_metric(), 1), + "expected backlog_size to be 1" + ); + let backlog_oldest_sequence = metrics + .iter() + .find(|&metric| metric.get_name() == "backlog_oldest_sequence") + .unwrap(); + assert!( + assert_metric_value(backlog_oldest_sequence.get_metric(), 5), + "expected backlog_oldest_sequence to be 5" + ); + } + + #[test] + fn update_backlog_empty() { + let state = TelemetryState::new( + Range { + start: 0, + end: 5000, + }, + 5, + Range { + start: 0, + end: 5000, + }, + 5, + ); + + let chain_id = ChainId::from_string("chain-test"); + let counterparty_chain_id = ChainId::from_string("counterpartychain-test"); + let channel_id = ChannelId::new(0); + let port_id = PortId::transfer(); + + state.backlog_insert(1, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_insert(2, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_insert(3, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_insert(4, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + state.backlog_insert(5, &chain_id, &channel_id, &port_id, &counterparty_chain_id); + + state.update_backlog( + vec![], + &chain_id, + &channel_id, + &port_id, + &counterparty_chain_id, + ); + + let metrics = state.exporter.registry().gather().clone(); + let backlog_size = metrics + .iter() + .find(|&metric| metric.get_name() == "backlog_size") + .unwrap(); + assert!( + assert_metric_value(backlog_size.get_metric(), 0), + "expected backlog_size to be 0" + ); + let backlog_oldest_sequence = metrics + .iter() + .find(|&metric| metric.get_name() == "backlog_oldest_sequence") + .unwrap(); + assert!( + assert_metric_value(backlog_oldest_sequence.get_metric(), 0), + "expected backlog_oldest_sequence to be 0" + ); + } + + fn assert_metric_value(metric: &[Metric], expected: u64) -> bool { + metric + .iter() + .any(|m| m.get_gauge().get_value() as u64 == expected) + } +} diff --git a/docs/architecture/adr-002-ibc-relayer.md b/docs/architecture/adr-002-ibc-relayer.md index 6ca29cb00a..983ff24805 100644 --- a/docs/architecture/adr-002-ibc-relayer.md +++ b/docs/architecture/adr-002-ibc-relayer.md @@ -793,6 +793,6 @@ The IBC Events, input to the relay thread are described here. ## References -> Are there any relevant PR comments, issues that led up to this, or articles referrenced for why we made the given design choice? If so link them here! +> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! * {reference link} diff --git a/docs/architecture/adr-003-handler-implementation.md b/docs/architecture/adr-003-handler-implementation.md index 62693d1c3f..e552f7672e 100644 --- a/docs/architecture/adr-003-handler-implementation.md +++ b/docs/architecture/adr-003-handler-implementation.md @@ -309,7 +309,7 @@ pub fn keep( > This section is very much a work in progress, as further investigation into what > a production-ready implementation of the `ctx` parameter of the top-level dispatcher -> is required. As such, implementors should feel free to disregard the recommendations +> is required. As such, implementers should feel free to disregard the recommendations > below, and are encouraged to come up with amendments to this ADR to better capture > the actual requirements. @@ -552,7 +552,7 @@ pub trait ClientKeeper { This way, only one implementation of the `ClientReader` and `ClientKeeper` trait is required, as it can delegate eg. the serialization of the underlying datatypes to the `Serialize` bound -of the `Any...` wrappper. +of the `Any...` wrapper. Both the `process` and `keep` function are defined to take a message generic over the actual client type: diff --git a/docs/architecture/adr-template.md b/docs/architecture/adr-template.md index 28a5ecfbbc..83d323eeec 100644 --- a/docs/architecture/adr-template.md +++ b/docs/architecture/adr-template.md @@ -31,6 +31,6 @@ If the proposed change will be large, please also indicate a way to do the chang ## References -> Are there any relevant PR comments, issues that led up to this, or articles referrenced for why we made the given design choice? If so link them here! +> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! * {reference link} diff --git a/docs/architecture/architecture.md b/docs/architecture/architecture.md index dddf2ae11a..e80c5bda79 100644 --- a/docs/architecture/architecture.md +++ b/docs/architecture/architecture.md @@ -110,7 +110,7 @@ Used by Hermes to gather telemetry data and expose it via a Prometheus endpoint. Most of the components in the `ibc` crate (i.e. the `modules` directory) have basic unit testing coverage. These unit tests make use of mocked up chain components in order to ensure that message payloads are being sent and received as expected. -We also run end-to-end tests to more thoroughly test IBC modules in a more heterogenous fashion. +We also run end-to-end tests to more thoroughly test IBC modules in a more heterogeneous fashion. ### Error Handling diff --git a/docs/spec/connection-handshake/L1_2.md b/docs/spec/connection-handshake/L1_2.md index 66a7eaa4eb..dbca2591d8 100644 --- a/docs/spec/connection-handshake/L1_2.md +++ b/docs/spec/connection-handshake/L1_2.md @@ -245,7 +245,7 @@ Specifically, termination implies that each module allocates in the local store _Remarks_: -- Uniqueness property essentially provides a safeguard against overwritting a connection in the store with some new set of parameters. +- Uniqueness property essentially provides a safeguard against overwriting a connection in the store with some new set of parameters. - The integrity property, in conjunction with uniqueness, ensures that there is continuity between the connections that a module initializes and the connections that this module opens. diff --git a/docs/spec/connection-handshake/L2-tla/Environment.tla b/docs/spec/connection-handshake/L2-tla/Environment.tla index c90176530b..6ea00d17c2 100644 --- a/docs/spec/connection-handshake/L2-tla/Environment.tla +++ b/docs/spec/connection-handshake/L2-tla/Environment.tla @@ -273,7 +273,7 @@ RelayNextEnv == the chains unless the chain has just a few (namely, `4`) heights left. 3. The environment may perform a relaying step, that is: - if there is a message in the ougoing buffer of a chain, the relayer + if there is a message in the outgoing buffer of a chain, the relayer moves this message to the ingoing buffer of the other chain, and also updates the client on the latter chain. @@ -328,7 +328,7 @@ Init == (* The two ICS3 modules and the environment alternate their steps non-deterministically. Eventually, the execution ends with either - successful (ICS3ReachedOpenConnection sub-action) or unsuccesfull + successful (ICS3ReachedOpenConnection sub-action) or unsuccessful (ICS3ImpossibleToAdvance sub-action) termination. *) Next == diff --git a/docs/spec/tla/fungible-token-transfer/Bank.tla b/docs/spec/tla/fungible-token-transfer/Bank.tla index 20e1af41ff..8b1630b584 100644 --- a/docs/spec/tla/fungible-token-transfer/Bank.tla +++ b/docs/spec/tla/fungible-token-transfer/Bank.tla @@ -30,7 +30,7 @@ AddCoins(accounts, accountID, amount) == ] -\* Transfer coins from senderAccounts to receiverAccounts, depeding on +\* Transfer coins from senderAccounts to receiverAccounts, depending on \* the sender addressees, receiver addressees and denomination \* - senderAccounts is a map from sender addresses and denominations \* to account balances diff --git a/docs/spec/tla/fungible-token-transfer/Chain.tla b/docs/spec/tla/fungible-token-transfer/Chain.tla index af779b2999..aeb25ea87b 100644 --- a/docs/spec/tla/fungible-token-transfer/Chain.tla +++ b/docs/spec/tla/fungible-token-transfer/Chain.tla @@ -126,7 +126,7 @@ SendPacket == LET updatedChainStore == WritePacketCommitment(chainStore, packet) IN \* if writing the packet commitment was successful /\ chainStore /= updatedChainStore - \* update chain store with packet committment + \* update chain store with packet commitment /\ chainStore' = updatedChainStore \* log sent packet /\ packetLog' = Append(packetLog, [ diff --git a/docs/spec/tla/fungible-token-transfer/IBCTokenTransferDefinitions.tla b/docs/spec/tla/fungible-token-transfer/IBCTokenTransferDefinitions.tla index 8e3f3c5641..3ce0509670 100644 --- a/docs/spec/tla/fungible-token-transfer/IBCTokenTransferDefinitions.tla +++ b/docs/spec/tla/fungible-token-transfer/IBCTokenTransferDefinitions.tla @@ -406,7 +406,7 @@ InitUnorderedChannelEnd(ChainID) == \* - height is initialized to 1 \* - counterpartyClientHeights is the set of installed client heights \* - the channelEnd is initialized to InitUnorderedChannelEnd -\* - the packet committments, receipts, acknowledgements, and packets +\* - the packet commitments, receipts, acknowledgements, and packets \* to acknowledge are empty ICS20InitChainStore(ChainID) == [ diff --git a/docs/spec/tla/fungible-token-transfer/ICS04PacketHandlers.tla b/docs/spec/tla/fungible-token-transfer/ICS04PacketHandlers.tla index cfcd4883a8..74f714434c 100644 --- a/docs/spec/tla/fungible-token-transfer/ICS04PacketHandlers.tla +++ b/docs/spec/tla/fungible-token-transfer/ICS04PacketHandlers.tla @@ -101,7 +101,7 @@ HandlePacketAck(chain, packetDatagram, log, accounts, escrowAccounts, maxBalance LET packet == packetDatagram.packet IN \* get acknowledgement LET ack == packetDatagram.acknowledgement IN - \* get packet committment that should be in chain store + \* get packet commitment that should be in chain store LET packetCommitment == [ portID |-> packet.srcPortID, channelID |-> packet.srcChannelID, @@ -141,7 +141,7 @@ HandlePacketAck(chain, packetDatagram, log, accounts, escrowAccounts, maxBalance escrowAccounts |-> escrowAccounts] -\* write packet committments to chain store +\* write packet commitments to chain store \* @type: (CHAINSTORE, PACKET) => CHAINSTORE; WritePacketCommitment(chain, packet) == \* get channel end @@ -245,7 +245,7 @@ TimeoutPacket(chain, counterpartyChain, accounts, escrowAccounts, packet, proofHeight, maxBalance) == \* get channel end LET channelEnd == chain.channelEnd IN - \* get packet committment that should be in chain store + \* get packet commitment that should be in chain store LET packetCommitment == [ portID |-> packet.srcPortID, channelID |-> packet.srcChannelID, @@ -303,7 +303,7 @@ TimeoutOnClose(chain, counterpartyChain, accounts, escrowAccounts, \* get counterparty channel end LET counterpartyChannelEnd == counterpartyChain.channelEnd IN - \* get packet committment that should be in chain store + \* get packet commitment that should be in chain store LET packetCommitment == [ portID |-> packet.srcPortID, channelID |-> packet.srcChannelID, diff --git a/docs/spec/tla/ibc-core/Chain.tla b/docs/spec/tla/ibc-core/Chain.tla index 500c985dec..5bbe315487 100644 --- a/docs/spec/tla/ibc-core/Chain.tla +++ b/docs/spec/tla/ibc-core/Chain.tla @@ -180,7 +180,7 @@ SendPacket == LET updatedChainStore == WritePacketCommitment(chainStore, packet) IN \* if writing the packet commitment was successful /\ chainStore /= updatedChainStore - \* update chain store with packet committment + \* update chain store with packet commitment /\ chainStore' = updatedChainStore \* log sent packet /\ packetLog' = Append(packetLog, [ diff --git a/docs/spec/tla/ibc-core/IBCCoreDefinitions.tla b/docs/spec/tla/ibc-core/IBCCoreDefinitions.tla index 53ee2a5731..3bd6e5bc2d 100644 --- a/docs/spec/tla/ibc-core/IBCCoreDefinitions.tla +++ b/docs/spec/tla/ibc-core/IBCCoreDefinitions.tla @@ -520,7 +520,7 @@ InitConnectionEnds(Versions, channelOrdering) == \* - height is initialized to 1 \* - the counterparty light client is uninitialized \* - the connection end is initialized to InitConnectionEnd -\* - the packet committments, receipts, acknowledgements, and +\* - the packet commitments, receipts, acknowledgements, and \* packets to acknowledge are empty \* @type: (Set(Int), Str) => Set(CHAINSTORE); InitChainStore(Versions, channelOrdering) == diff --git a/docs/spec/tla/ibc-core/ICS03ConnectionHandlers.tla b/docs/spec/tla/ibc-core/ICS03ConnectionHandlers.tla index 72fc205aa3..afe0443645 100644 --- a/docs/spec/tla/ibc-core/ICS03ConnectionHandlers.tla +++ b/docs/spec/tla/ibc-core/ICS03ConnectionHandlers.tla @@ -58,7 +58,7 @@ HandleConnOpenTry(chainID, chain, datagrams) == THEN LET connOpenTryDgr == CHOOSE dgr \in connOpenTryDgrs : TRUE IN LET versionIntersection == chain.connectionEnd.versions \intersect connOpenTryDgr.versions IN - \* if the versions from the datagram overlap with the supported versions of the connnection end + \* if the versions from the datagram overlap with the supported versions of the connection end IF /\ versionIntersection /= {} \* if the connection end is uninitialized /\ \/ chain.connectionEnd.state = "UNINIT" diff --git a/docs/spec/tla/ibc-core/ICS04PacketHandlers.tla b/docs/spec/tla/ibc-core/ICS04PacketHandlers.tla index 4e4808f040..bb08d53e22 100644 --- a/docs/spec/tla/ibc-core/ICS04PacketHandlers.tla +++ b/docs/spec/tla/ibc-core/ICS04PacketHandlers.tla @@ -93,7 +93,7 @@ HandlePacketAck(chainID, chain, packetDatagram, log) == LET channelEnd == GetChannelEnd(chain) IN \* get packet LET packet == packetDatagram.packet IN - \* get packet committment that should be in chain store + \* get packet commitment that should be in chain store LET packetCommitment == [ portID |-> packet.srcPortID, channelID |-> packet.srcChannelID, @@ -135,7 +135,7 @@ HandlePacketAck(chainID, chain, packetDatagram, log) == ELSE [chainStore |-> chain, packetLog |-> log] -\* write packet committments to chain store +\* write packet commitments to chain store \* @type: (CHAINSTORE, PACKET) => CHAINSTORE; WritePacketCommitment(chain, packet) == \* get chainID's connection end @@ -158,7 +158,7 @@ WritePacketCommitment(chain, packet) == /\ \/ packet.timeoutHeight = 0 \/ latestClientHeight < packet.timeoutHeight THEN IF \* if the channel is ordered, check if packetSeq is nextSendSeq, - \* add a packet committment in the chain store, and increase nextSendSeq + \* add a packet commitment in the chain store, and increase nextSendSeq /\ channelEnd.order = "ORDERED" /\ packet.sequence = channelEnd.nextSendSeq THEN [chain EXCEPT @@ -171,7 +171,7 @@ WritePacketCommitment(chain, packet) == ] \* otherwise, do not update the chain store ELSE IF \* if the channel is unordered, - \* add a packet committment in the chain store + \* add a packet commitment in the chain store /\ channelEnd.order = "UNORDERED" THEN [chain EXCEPT !.packetCommitments = @@ -248,7 +248,7 @@ TimeoutPacket(chain, counterpartyChain, packet, proofHeight) == \* get counterparty channel end LET counterpartyChannelEnd == GetChannelEnd(counterpartyChain) IN - \* get packet committment that should be in chain store + \* get packet commitment that should be in chain store LET packetCommitment == [ portID |-> packet.srcPortID, channelID |-> packet.srcChannelID, @@ -310,7 +310,7 @@ TimeoutOnClose(chain, counterpartyChain, packet, proofHeight) == \* get counterparty channel end LET counterpartyChannelEnd == GetChannelEnd(counterpartyChain) IN - \* get packet committment that should be in chain store + \* get packet commitment that should be in chain store LET packetCommitment == [ portID |-> packet.srcPortID, channelID |-> packet.srcChannelID, diff --git a/docs/spec/tla/packet-delay/Chain.tla b/docs/spec/tla/packet-delay/Chain.tla index c21ad200d0..f2b9f03e3c 100644 --- a/docs/spec/tla/packet-delay/Chain.tla +++ b/docs/spec/tla/packet-delay/Chain.tla @@ -90,7 +90,7 @@ SendPacket == srcChannelID |-> chainStore.channelEnd.channelID, dstPortID |-> chainStore.channelEnd.counterpartyPortID, dstChannelID |-> chainStore.channelEnd.counterpartyChannelID] IN - \* update chain store with packet committment + \* update chain store with packet commitment /\ chainStore' = WritePacketCommitment(chainStore, packet) \* log sent packet /\ packetLog' = Append(packetLog, diff --git a/docs/spec/tla/packet-delay/IBCPacketDelayDefinitions.tla b/docs/spec/tla/packet-delay/IBCPacketDelayDefinitions.tla index cad0aee7ea..915229f896 100644 --- a/docs/spec/tla/packet-delay/IBCPacketDelayDefinitions.tla +++ b/docs/spec/tla/packet-delay/IBCPacketDelayDefinitions.tla @@ -396,7 +396,7 @@ InitChannelEnd(ChainID, ChannelOrdering) == \* - timestamp is initialized to 1 \* - there are no installed client heights \* - the channel end is initialized to InitChannelEnd -\* - the packet committments, receipts, acknowledgements, and packets +\* - the packet commitments, receipts, acknowledgements, and packets \* to acknowledge are empty \* @type: (Str, Set(Int), Str, Int) => CHAINSTORE; InitChainStore(ChainID, Heights, ChannelOrdering, MaxDelay) == diff --git a/docs/spec/tla/packet-delay/ICS04PacketHandlers.tla b/docs/spec/tla/packet-delay/ICS04PacketHandlers.tla index 22a3b359be..36a5ea8379 100644 --- a/docs/spec/tla/packet-delay/ICS04PacketHandlers.tla +++ b/docs/spec/tla/packet-delay/ICS04PacketHandlers.tla @@ -111,7 +111,7 @@ HandlePacketAck(chainID, chain, packetDatagram, delay, log, datagramTimestamp) = LET channelEnd == chain.channelEnd IN \* get packet LET packet == packetDatagram.packet IN - \* get packet committment that should be in chain store + \* get packet commitment that should be in chain store LET packetCommitment == [portID |-> packet.srcPortID, channelID |-> packet.srcChannelID, sequence |-> packet.sequence, @@ -125,7 +125,7 @@ HandlePacketAck(chainID, chain, packetDatagram, delay, log, datagramTimestamp) = IF \* if the channel end is open for packet transmission /\ channelEnd.state = "OPEN" - \* if the packet committment exists in the chain store + \* if the packet commitment exists in the chain store /\ packetCommitment \in chain.packetCommitments \* if the "PacketRecv" datagram has valid port and channel IDs /\ packet.srcPortID = channelEnd.portID @@ -167,7 +167,7 @@ HandlePacketAck(chainID, chain, packetDatagram, delay, log, datagramTimestamp) = ELSE [chainStore |-> chain, packetLog |-> log, datagramTimestamp |-> datagramTimestamp] -\* write packet committments to chain store +\* write packet commitments to chain store \* @type: (CHAINSTORE, PACKET) => CHAINSTORE; WritePacketCommitment(chain, packet) == \* get channel end @@ -186,7 +186,7 @@ WritePacketCommitment(chain, packet) == /\ \/ packet.timeoutHeight = 0 \/ latestClientHeight < packet.timeoutHeight THEN IF \* if the channel is ordered, check if packetSeq is nextSendSeq, - \* add a packet committment in the chain store, and increase nextSendSeq + \* add a packet commitment in the chain store, and increase nextSendSeq /\ channelEnd.order = "ORDERED" /\ packet.sequence = channelEnd.nextSendSeq THEN [chain EXCEPT @@ -203,7 +203,7 @@ WritePacketCommitment(chain, packet) == \* otherwise, do not update the chain store ELSE chain ELSE IF \* if the channel is unordered, - \* add a packet committment in the chain store + \* add a packet commitment in the chain store /\ channelEnd.order = "UNORDERED" THEN [chain EXCEPT !.packetCommitments = @@ -282,7 +282,7 @@ TimeoutPacket(chain, counterpartyChain, packet, proofHeight) == \* get counterparty channel end LET counterpartyChannelEnd == counterpartyChain.channelEnd IN - \* get packet committment that should be in chain store + \* get packet commitment that should be in chain store LET packetCommitment == [portID |-> packet.srcPortID, channelID |-> packet.srcChannelID, sequence |-> packet.sequence, @@ -336,7 +336,7 @@ TimeoutOnClose(chain, counterpartyChain, packet, proofHeight) == \* get counterparty channel end LET counterpartyChannelEnd == counterpartyChain.channelEnd IN - \* get packet committment that should be in chain store + \* get packet commitment that should be in chain store LET packetCommitment == [portID |-> packet.srcPortID, channelID |-> packet.srcChannelID, sequence |-> packet.sequence, diff --git a/flake.lock b/flake.lock index 9db2166d6c..874e699bf9 100644 --- a/flake.lock +++ b/flake.lock @@ -54,16 +54,16 @@ "celestia-src": { "flake": false, "locked": { - "lastModified": 1697229641, - "narHash": "sha256-1HvYCZEcB7BCY5q9ykEZuCMzqcbA3JoDMwoHVJ0+SaY=", + "lastModified": 1700494564, + "narHash": "sha256-O6KrCStrZLmWy3xybQUNsWEb3O7vIRCFDE9MsEtsFro=", "owner": "celestiaorg", "repo": "celestia-app", - "rev": "2b8cc9e23826ccb658b7dd5aa6cd51a0921a0c29", + "rev": "2dbfabf1849e166974c1287c35b43e5e07727643", "type": "github" }, "original": { "owner": "celestiaorg", - "ref": "v1.1.0", + "ref": "v1.4.0", "repo": "celestia-app", "type": "github" } @@ -71,17 +71,17 @@ "centauri-src": { "flake": false, "locked": { - "lastModified": 1699873949, - "narHash": "sha256-oJNkzBM0pD6+KkIH8ICVcl8yp94R2EoIsvb/i+t5c8c=", - "owner": "dzmitry-lahoda-forks", - "repo": "composable-centauri", - "rev": "c6736b946c3bc6c7c23788d499b2dff94ffd39f5", + "lastModified": 1701431373, + "narHash": "sha256-EpZ1CQN0gMU8W1u3CMbqlaHeeVpQO2i1GPg6pOyOQTc=", + "owner": "ComposableFi", + "repo": "composable-cosmos", + "rev": "387c96b434db9d96b0506aa7f14536d9bdec968c", "type": "github" }, "original": { - "owner": "dzmitry-lahoda-forks", - "repo": "composable-centauri", - "rev": "c6736b946c3bc6c7c23788d499b2dff94ffd39f5", + "owner": "ComposableFi", + "repo": "composable-cosmos", + "rev": "387c96b434db9d96b0506aa7f14536d9bdec968c", "type": "github" } }, @@ -113,8 +113,9 @@ "cosmos-sdk-src": "cosmos-sdk-src", "cosmwasm-src": "cosmwasm-src", "crescent-src": "crescent-src", + "cw-plus-src": "cw-plus-src", "evmos-src": "evmos-src", - "flake-utils": "flake-utils", + "flake-parts": "flake-parts", "gaia-main-src": "gaia-main-src", "gaia10-src": "gaia10-src", "gaia11-src": "gaia11-src", @@ -149,7 +150,7 @@ "nix-std": "nix-std", "nixpkgs": "nixpkgs", "osmosis-src": "osmosis-src", - "pre-commit-hooks": "pre-commit-hooks", + "provenance-src": "provenance-src", "regen-src": "regen-src", "relayer-src": "relayer-src", "rust-overlay": "rust-overlay", @@ -161,24 +162,24 @@ "stoml-src": "stoml-src", "stride-consumer-src": "stride-consumer-src", "stride-src": "stride-src", - "ts-relayer-src": "ts-relayer-src", "umee-src": "umee-src", "wasmd-src": "wasmd-src", "wasmd_next-src": "wasmd_next-src", - "wasmvm_0_16_3-src": "wasmvm_0_16_3-src", "wasmvm_1-src": "wasmvm_1-src", "wasmvm_1_1_1-src": "wasmvm_1_1_1-src", "wasmvm_1_1_2-src": "wasmvm_1_1_2-src", "wasmvm_1_2_3-src": "wasmvm_1_2_3-src", "wasmvm_1_2_4-src": "wasmvm_1_2_4-src", + "wasmvm_1_3_0-src": "wasmvm_1_3_0-src", + "wasmvm_1_5_0-src": "wasmvm_1_5_0-src", "wasmvm_1_beta7-src": "wasmvm_1_beta7-src" }, "locked": { - "lastModified": 1700133562, - "narHash": "sha256-ItMoSAOJq2yKXeeyqXB+Aei+qlmwmmPz9HgnGAfDJNs=", + "lastModified": 1701457684, + "narHash": "sha256-Tx3WsOM9scTXDHFyL5vNcQnCmT7Mx0dYg1Nmz8cFwt4=", "owner": "informalsystems", "repo": "cosmos.nix", - "rev": "f93ee05ad75196fa48ec025b1632cd12ba322e5c", + "rev": "c0bb979a518aa08ba064112a85e03fbc7a7d2869", "type": "github" }, "original": { @@ -207,16 +208,16 @@ "cosmwasm-src": { "flake": false, "locked": { - "lastModified": 1685975182, - "narHash": "sha256-6uhJijuDPXvEZG8mKBGyswsj/JR75Ui713BVx4XD7WI=", + "lastModified": 1698745412, + "narHash": "sha256-41s5jLFzw9Jo+dirAVOad1dtUqCBY6rIz/6TRc0frMw=", "owner": "CosmWasm", "repo": "cosmwasm", - "rev": "b8e9c03e744e8b84174477e20eb934529cad41e7", + "rev": "89891f0bb2de2c83d00600208695d0d5e1b617ac", "type": "github" }, "original": { "owner": "CosmWasm", - "ref": "v1.2.6", + "ref": "v1.5.0", "repo": "cosmwasm", "type": "github" } @@ -238,6 +239,23 @@ "type": "github" } }, + "cw-plus-src": { + "flake": false, + "locked": { + "lastModified": 1700757493, + "narHash": "sha256-E5vkY+B4BDoTDtvuB+7Tm3k/5dCYPSjUujMWcgYsWf0=", + "owner": "CosmWasm", + "repo": "cw-plus", + "rev": "d33824679d5b91ca0b4615a8dede7e0028947486", + "type": "github" + }, + "original": { + "owner": "CosmWasm", + "ref": "v1.1.2", + "repo": "cw-plus", + "type": "github" + } + }, "evmos-src": { "flake": false, "locked": { @@ -255,43 +273,27 @@ "type": "github" } }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-utils": { + "flake-parts": { "inputs": { - "systems": "systems" + "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1694529238, - "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", + "lastModified": 1698882062, + "narHash": "sha256-HkhafUayIqxXyHH1X8d9RDl1M2CkFgZLjKD3MzabiEo=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "8c9fa2545007b49a5db5f650ae91f227672c3877", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "hercules-ci", + "repo": "flake-parts", "type": "github" } }, - "flake-utils_2": { + "flake-utils": { "inputs": { - "systems": "systems_2" + "systems": "systems" }, "locked": { "lastModified": 1681202837, @@ -307,16 +309,13 @@ "type": "github" } }, - "flake-utils_3": { - "inputs": { - "systems": "systems_3" - }, + "flake-utils_2": { "locked": { - "lastModified": 1694529238, - "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", + "lastModified": 1667395993, + "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", "owner": "numtide", "repo": "flake-utils", - "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", + "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", "type": "github" }, "original": { @@ -325,16 +324,16 @@ "type": "github" } }, - "flake-utils_4": { + "flake-utils_3": { "inputs": { - "systems": "systems_4" + "systems": "systems_2" }, "locked": { - "lastModified": 1694529238, - "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", + "lastModified": 1701680307, + "narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=", "owner": "numtide", "repo": "flake-utils", - "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", + "rev": "4022d587cbbfd70fe950c1e2083a02621806a725", "type": "github" }, "original": { @@ -549,39 +548,17 @@ "gex-src": { "flake": false, "locked": { - "lastModified": 1660333522, - "narHash": "sha256-7jtCpOTHamXAInfKYkMIDFKF4lViuPkusThj4ggGUbg=", + "lastModified": 1697704475, + "narHash": "sha256-lgJVxn7Q2I8TBdvbzyn7bl1MN5StEw3NvRzCvBFFuB8=", "owner": "cosmos", "repo": "gex", - "rev": "bc168741b2019745d343606d31b5c274f216fc3f", + "rev": "233d335dc9e8c89fb318d1081fae74435f6cac11", "type": "github" }, "original": { "owner": "cosmos", "repo": "gex", - "rev": "bc168741b2019745d343606d31b5c274f216fc3f", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "cosmos-nix", - "pre-commit-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1660459072, - "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", + "rev": "233d335dc9e8c89fb318d1081fae74435f6cac11", "type": "github" } }, @@ -842,16 +819,16 @@ "juno-src": { "flake": false, "locked": { - "lastModified": 1679292088, - "narHash": "sha256-9xWOnlqjJWY7dyICYjl1Fmqi27352TF9ihcbZBI/Dps=", + "lastModified": 1697166503, + "narHash": "sha256-z9TOeDyUnn1T8Z662XqQJ9ydVIKKB54YISt7ms4xvos=", "owner": "CosmosContracts", "repo": "juno", - "rev": "1f392744afd9829f3f7837fe6f13800a19bad961", + "rev": "48507ed9b83511089cbf1fdc5bae54cae4a7f4b2", "type": "github" }, "original": { "owner": "CosmosContracts", - "ref": "v13.0.1", + "ref": "v17.1.1", "repo": "juno", "type": "github" } @@ -859,16 +836,16 @@ "migaloo-src": { "flake": false, "locked": { - "lastModified": 1681833529, - "narHash": "sha256-7sOAcUcc1HpZgLjjdiNuXeXCq9vB9EXCMY4YIT1MAgU=", + "lastModified": 1699273936, + "narHash": "sha256-O+vGWFnV3+bvXinxl1QjVyDnQskp5H1VnlL+TaMfiSs=", "owner": "White-Whale-Defi-Platform", "repo": "migaloo-chain", - "rev": "129e6fecd377614123f2af33417f9e31accf195f", + "rev": "de98de2dd96917ae1ab79161d573fc0b4ee1facf", "type": "github" }, "original": { "owner": "White-Whale-Defi-Platform", - "ref": "v2.0.2", + "ref": "v3.0.2", "repo": "migaloo-chain", "type": "github" } @@ -876,16 +853,16 @@ "neutron-src": { "flake": false, "locked": { - "lastModified": 1685114240, - "narHash": "sha256-xHi4W4fOT3kTmkPEKdGp6JbzKQELdWy9PIn0qsZhprY=", + "lastModified": 1701174344, + "narHash": "sha256-NuoOlrciBeL2f/A7wlQBqYlYJhSYucXRhLgxdasfyhI=", "owner": "neutron-org", "repo": "neutron", - "rev": "3c8dde1ff524551e24295d393a3913c25199d265", + "rev": "e605ed3db4381994ee8185ba4a0ff0877d34e67f", "type": "github" }, "original": { "owner": "neutron-org", - "ref": "v1.0.2", + "ref": "v2.0.0", "repo": "neutron", "type": "github" } @@ -907,11 +884,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1699725108, - "narHash": "sha256-NTiPW4jRC+9puakU4Vi8WpFEirhp92kTOSThuZke+FA=", + "lastModified": 1701040486, + "narHash": "sha256-vawYwoHA5CwvjfqaT3A5CT9V36Eq43gxdwpux32Qkjw=", "owner": "nixos", "repo": "nixpkgs", - "rev": "911ad1e67f458b6bcf0278fa85e33bb9924fed7e", + "rev": "45827faa2132b8eade424f6bdd48d8828754341a", "type": "github" }, "original": { @@ -921,18 +898,20 @@ "type": "github" } }, - "nixpkgs-stable": { + "nixpkgs-lib": { "locked": { - "lastModified": 1685801374, - "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", + "dir": "lib", + "lastModified": 1698611440, + "narHash": "sha256-jPjHjrerhYDy3q9+s5EAsuhyhuknNfowY6yt6pjn9pc=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", + "rev": "0cbe9f69c234a7700596e943bfae7ef27a31b735", "type": "github" }, "original": { + "dir": "lib", "owner": "NixOS", - "ref": "nixos-23.05", + "ref": "nixos-unstable", "repo": "nixpkgs", "type": "github" } @@ -955,11 +934,11 @@ }, "nixpkgs_3": { "locked": { - "lastModified": 1699343069, - "narHash": "sha256-s7BBhyLA6MI6FuJgs4F/SgpntHBzz40/qV0xLPW6A1Q=", + "lastModified": 1674990008, + "narHash": "sha256-4zOyp+hFW2Y7imxIpZqZGT8CEqKmDjwgfD6BzRUE0mQ=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "ec750fd01963ab6b20ee1f0cb488754e8036d89d", + "rev": "d2bbcbe6c626d339b25a4995711f07625b508214", "type": "github" }, "original": { @@ -971,11 +950,11 @@ }, "nixpkgs_4": { "locked": { - "lastModified": 1700108881, - "narHash": "sha256-+Lqybl8kj0+nD/IlAWPPG/RDTa47gff9nbei0u7BntE=", + "lastModified": 1701693815, + "narHash": "sha256-7BkrXykVWfkn6+c1EhFA3ko4MLi3gVG0p9G96PNnKTM=", "owner": "nixos", "repo": "nixpkgs", - "rev": "7414e9ee0b3e9903c24d3379f577a417f0aae5f1", + "rev": "09ec6a0881e1a36c29d67497693a67a16f4da573", "type": "github" }, "original": { @@ -988,45 +967,34 @@ "osmosis-src": { "flake": false, "locked": { - "lastModified": 1695859760, - "narHash": "sha256-Ad2Z4rzD0HQtnj2aQ4GD6ic5sxOHVPsaW4iNKZEDTiw=", + "lastModified": 1700576443, + "narHash": "sha256-UE3XEgdSp8mlgIKQRrBfb4wiPEeagB/wNWfDvDq4up4=", "owner": "osmosis-labs", "repo": "osmosis", - "rev": "38d1d2b748d161fd23f966d88b23b66a63c9a284", + "rev": "d9965b09d3e8690c77050bb095bc5b69772ebdfb", "type": "github" }, "original": { "owner": "osmosis-labs", - "ref": "v19.2.0", + "ref": "v20.4.0", "repo": "osmosis", "type": "github" } }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": "flake-compat", - "flake-utils": [ - "cosmos-nix", - "flake-utils" - ], - "gitignore": "gitignore", - "nixpkgs": [ - "cosmos-nix", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, + "provenance-src": { + "flake": false, "locked": { - "lastModified": 1698227354, - "narHash": "sha256-Fi5H9jbaQLmLw9qBi/mkR33CoFjNbobo5xWdX4tKz1Q=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "bd38df3d508dfcdff52cd243d297f218ed2257bf", + "lastModified": 1699901286, + "narHash": "sha256-dTX3kg2QUsC9SwsaommP4IFgIdQgWZrGQNtp/B+fzys=", + "owner": "provenance-io", + "repo": "provenance", + "rev": "91b0813de2f93d03cefe8efb226dc32f02690840", "type": "github" }, "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", + "owner": "provenance-io", + "ref": "v1.17.0", + "repo": "provenance", "type": "github" } }, @@ -1067,33 +1035,32 @@ "root": { "inputs": { "cosmos-nix": "cosmos-nix", - "flake-utils": "flake-utils_4", + "flake-utils": "flake-utils_3", "nixpkgs": "nixpkgs_4" } }, "rust-overlay": { "inputs": { - "flake-utils": "flake-utils_2", + "flake-utils": "flake-utils", "nixpkgs": "nixpkgs_2" }, "locked": { - "lastModified": 1688265347, - "narHash": "sha256-oe3kLnNvw2VWbG4Rp6IWUO5Uu5gF8J2oq8DbqbCsdZ4=", + "lastModified": 1701310566, + "narHash": "sha256-CL9J3xUR2Ejni4LysrEGX0IdO+Y4BXCiH/By0lmF3eQ=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "b8f3db465405014039985f1c5cea92cc29e1b3b5", + "rev": "6d3c6e185198b8bf7ad639f22404a75aa9a09bff", "type": "github" }, "original": { "owner": "oxalica", "repo": "rust-overlay", - "rev": "b8f3db465405014039985f1c5cea92cc29e1b3b5", "type": "github" } }, "sbt-derivation": { "inputs": { - "flake-utils": "flake-utils_3", + "flake-utils": "flake-utils_2", "nixpkgs": "nixpkgs_3" }, "locked": { @@ -1257,53 +1224,6 @@ "type": "github" } }, - "systems_3": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "systems_4": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "ts-relayer-src": { - "flake": false, - "locked": { - "lastModified": 1640291594, - "narHash": "sha256-mSI+qgB+e9YcFrcUAgHQnbXOQ8wxO2GmD0wNe+3ya0g=", - "owner": "confio", - "repo": "ts-relayer", - "rev": "23930794ddb64afcc80ac73ffe31ca69072c6549", - "type": "github" - }, - "original": { - "owner": "confio", - "ref": "v0.4.0", - "repo": "ts-relayer", - "type": "github" - } - }, "umee-src": { "flake": false, "locked": { @@ -1355,23 +1275,6 @@ "type": "github" } }, - "wasmvm_0_16_3-src": { - "flake": false, - "locked": { - "lastModified": 1640251271, - "narHash": "sha256-XvgAMDvAgzWaH7Q+mNZUBoaVhqAVlZ4ucIL0QFyNvWw=", - "owner": "CosmWasm", - "repo": "wasmvm", - "rev": "458e983721624548e66c0dcdd35140383966515e", - "type": "github" - }, - "original": { - "owner": "CosmWasm", - "ref": "v0.16.3", - "repo": "wasmvm", - "type": "github" - } - }, "wasmvm_1-src": { "flake": false, "locked": { @@ -1457,6 +1360,40 @@ "type": "github" } }, + "wasmvm_1_3_0-src": { + "flake": false, + "locked": { + "lastModified": 1689589428, + "narHash": "sha256-rsTYvbkYpDkUE4IvILdSL3hXMgAWxz5ltGotJB2t1e4=", + "owner": "CosmWasm", + "repo": "wasmvm", + "rev": "71a9c0dc0ecf9623148e82facb3564fbbf0a896f", + "type": "github" + }, + "original": { + "owner": "CosmWasm", + "ref": "v1.3.0", + "repo": "wasmvm", + "type": "github" + } + }, + "wasmvm_1_5_0-src": { + "flake": false, + "locked": { + "lastModified": 1698746477, + "narHash": "sha256-l0cNF0YjviEl/JLJ4VdvDtIGuAYyFfncVo83ROfQFD8=", + "owner": "CosmWasm", + "repo": "wasmvm", + "rev": "2041b184c146f278157d195361bc6cc6b56cc9d4", + "type": "github" + }, + "original": { + "owner": "CosmWasm", + "ref": "v1.5.0", + "repo": "wasmvm", + "type": "github" + } + }, "wasmvm_1_beta7-src": { "flake": false, "locked": { diff --git a/flake.nix b/flake.nix index 9defb85611..7e2f5756e3 100644 --- a/flake.nix +++ b/flake.nix @@ -27,12 +27,13 @@ packages = { inherit (cosmos-nix) + apalache + celestia cometbft + evmos gaia6-ordered gaia13 gaia14 - osmosis - wasmd ibc-go-v2-simapp ibc-go-v3-simapp ibc-go-v4-simapp @@ -41,16 +42,16 @@ ibc-go-v7-simapp ibc-go-v8-simapp interchain-security - apalache - evmos + migaloo + neutron juno + osmosis + provenance stride stride-no-admin stride-consumer-no-admin stride-consumer - migaloo - neutron - celestia + wasmd ; python = nixpkgs.python3.withPackages (p: [ diff --git a/guide/README.md b/guide/README.md index fa91a9112b..119ddbdef2 100644 --- a/guide/README.md +++ b/guide/README.md @@ -10,7 +10,7 @@ mdBook is a utility to create modern online books from Markdown files. This guide should be permanently deployed at its latest stable version at [hermes.informal.systems](https://hermes.informal.systems). -Current version: `v1.7.3`. +Current version: `v1.7.4`. The version of this guide is aligned with the [versioning of the ibc crates](../README.md). @@ -72,4 +72,4 @@ Basically if you want to add new content to the guide, just add an entry to the If you are adding content using your favorite IDE and have a terminal opened running `mdbook serve`, it provides a convenient watch functionality, so any changes detected on local files will trigger another build and if you refresh the guide on your browser they will be shown there. #### Submit your changes -Once you finish adding the new content just commit your changes (`git commit`) and push them to the respository (`git push`). +Once you finish adding the new content just commit your changes (`git commit`) and push them to the repository (`git push`). diff --git a/guide/src/SUMMARY.md b/guide/src/SUMMARY.md index b7340998c1..e85b810e6b 100644 --- a/guide/src/SUMMARY.md +++ b/guide/src/SUMMARY.md @@ -1,6 +1,6 @@ # Summary -# Hermes v1.7.3 +# Hermes v1.7.4 --- - [Introduction](./index.md) diff --git a/guide/src/advanced/features.md b/guide/src/advanced/features.md index 749c94f12e..6af3001987 100644 --- a/guide/src/advanced/features.md +++ b/guide/src/advanced/features.md @@ -4,8 +4,8 @@ This section includes a summary of the supported and planned features. It also i > **Cosmos SDK & IBC compatibility:** > Hermes supports Cosmos SDK chains implementing the [IBC protocol v1][ibcv1-proto] protocol specification. -> Cosmos SDK versions `0.44.0` through `0.47.x` are officially supported. -> IBC-go versions `1.1.*` thorough `7.*` are officially supported. +> Cosmos SDK versions `0.45.0` through `0.50.x` are officially supported. +> IBC-go versions `4.1.1` through `8.x` are officially supported. > In case Hermes finds an incompatible SDK or IBC-go version, it will output a log warning upon initialization as part of the `start` command or upon `health-check` command. --- @@ -112,6 +112,8 @@ __Feature comparison between Hermes and the Go relayer__ | | | | | FT_Transfer | ✅ | ✅ | can submit an ICS-20 fungible token transfer message | ICA_Relay | ✅ | ✅ | can relay ICS-27 Interchain account packets +| Interchain Query (ICQ) support | ✅ | ✅ | interchain querying using ABCI +| Cross-chain Queries | ✅ | ✅ | cross-chain querying between IBC-enabled chains | Packet_Recv_A | ✅ | ✅ | | Packet_Recv_P | ✅ | ✅ | | Packet_Timeout_A | ✅ | ✅ | diff --git a/guide/src/advanced/troubleshooting/cross-comp-config.md b/guide/src/advanced/troubleshooting/cross-comp-config.md index 8c6c07cd9c..33bd231224 100644 --- a/guide/src/advanced/troubleshooting/cross-comp-config.md +++ b/guide/src/advanced/troubleshooting/cross-comp-config.md @@ -42,7 +42,7 @@ __Hermes vs other configuration parameters that may cause Hermes failures__ ## Recheck When relaying packets, Hermes may send up multiple transactions to the full node's mempool. Hermes uses the `broadcast_tx_sync` RPC which does some basic verification and then returns the Tx hash back. -Unless configured with `sequential_batch_tx = true`, Hermes does not wait for a transaction to be included in a block before sending the next transaction. For this to be possible, Hermes keeps track of the account sequence number locally, incrementing it after each succesfull `broadcast_tx_sync` RPC. +Unless configured with `sequential_batch_tx = true`, Hermes does not wait for a transaction to be included in a block before sending the next transaction. For this to be possible, Hermes keeps track of the account sequence number locally, incrementing it after each successful `broadcast_tx_sync` RPC. During peak periods, it is possible that not all Tx-es in the mempool are included in a block. In order for new transactions to be accepted along with the pending Tx-es, the full node must be configured with `recheck = true`. Otherwise, Hermes may get the following error: ``` @@ -284,9 +284,9 @@ Set `ccv_consumer_chain = true` in `config.toml`. If Hermes is set to query CometBFT's `/block_results` RPC endpoint (which is the case when Hermes is set to use the [pull-based event source][pull-based-event-source]), you may encounter an `Internal error: node is not persisting abci responses (code: -32603)` when clearing packets. -This is likely due to the underlying CometBFT node being configured to discard ABCI responses via the `discard_abci_responses` configuration paramter being set to `true` in the Comet config. When this option is set to `true`, Hermes will not be able to clear any packets that were sent in either a `begin_block` or an `end_block`; transactions sent using `/tx_search` should still be cleared though. In addition, Hermes will not be able to relay using the pull-based event source if ABCI responses are being discarded. +This is likely due to the underlying CometBFT node being configured to discard ABCI responses via the `discard_abci_responses` configuration parameter being set to `true` in the Comet config. When this option is set to `true`, Hermes will not be able to clear any packets that were sent in either a `begin_block` or an `end_block`; transactions sent using `/tx_search` should still be cleared though. In addition, Hermes will not be able to relay using the pull-based event source if ABCI responses are being discarded. ### Fix -Set the Comet node's `discard_abci_resonses = false` in the Comet configuration file. +Set the Comet node's `discard_abci_responses = false` in the Comet configuration file. -[pull-based-event-source]: ./../../documentation/configuration/configure-hermes.md#configuring-support-for-wasm-relaying \ No newline at end of file +[pull-based-event-source]: ./../../documentation/configuration/configure-hermes.md#configuring-support-for-wasm-relaying diff --git a/guide/src/advanced/troubleshooting/genesis-restart.md b/guide/src/advanced/troubleshooting/genesis-restart.md index dfeeefc37e..6d75986645 100644 --- a/guide/src/advanced/troubleshooting/genesis-restart.md +++ b/guide/src/advanced/troubleshooting/genesis-restart.md @@ -1,4 +1,4 @@ -# Updating a client after a Genesis restart withtout IBC upgrade proposal +# Updating a client after a Genesis restart without IBC upgrade proposal If a chain went through a genesis restart without an IBC upgrade proposal updating the client can result in an error due to blocks at lower heights not being available. diff --git a/guide/src/assets/grafana_template.json b/guide/src/assets/grafana_template.json index 2d7e91454a..ae3e049667 100644 --- a/guide/src/assets/grafana_template.json +++ b/guide/src/assets/grafana_template.json @@ -958,7 +958,7 @@ }, "editorMode": "builder", "exemplar": false, - "expr": "backlog_oldest_timestamp{job=\"hermes\"}", + "expr": "backlog_latest_update_timestamp{job=\"hermes\"}", "format": "table", "hide": false, "instant": true, @@ -1715,7 +1715,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Indicates the number of `event` observed via the websocket subcription.", + "description": "Indicates the number of `event` observed via the websocket subscription.", "fieldConfig": { "defaults": { "color": { diff --git a/guide/src/documentation/commands/fee/register-counterparty-payee.md b/guide/src/documentation/commands/fee/register-counterparty-payee.md index 8831b59ac0..593ef984f5 100644 --- a/guide/src/documentation/commands/fee/register-counterparty-payee.md +++ b/guide/src/documentation/commands/fee/register-counterparty-payee.md @@ -2,7 +2,7 @@ Use this command in order to specify the address which will receive the `recv_fee` from incentivised packets relayed by the specified chain on the specified channel. -__NOTE:__ If the Hermes configuration parameter `auto_register_counterpary_payee = true` is set, make sure to use the `hermes fee register-counterparty-payee` command after calling `hermes start`, otherwise `auto_register_counterparty_payee` will overwrite the address registered using `hermes fee register-counterparty-payee`. +__NOTE:__ If the Hermes configuration parameter `auto_register_counterparty_payee = true` is set, make sure to use the `hermes fee register-counterparty-payee` command after calling `hermes start`, otherwise `auto_register_counterparty_payee` will overwrite the address registered using `hermes fee register-counterparty-payee`. ```shell {{#include ../../../templates/help_templates/fee/register-counterparty-payee.md}} @@ -18,4 +18,4 @@ Register the address `cosmos10h9stc5v6ntgeygf5xf945njqq5h32r53uquvw` for the cha ```json SUCCESS Successfully registered counterparty payee -``` \ No newline at end of file +``` diff --git a/guide/src/documentation/commands/logs/index.md b/guide/src/documentation/commands/logs/index.md index cb94477a5c..e146314568 100644 --- a/guide/src/documentation/commands/logs/index.md +++ b/guide/src/documentation/commands/logs/index.md @@ -8,7 +8,7 @@ This command allows you to easily update the lowest log level displayed by Herme ## Set Raw Filter -This command allows you to update the tracing directive used to filter the logs. Please use this command with caution as it requires a precise syntaxe. +This command allows you to update the tracing directive used to filter the logs. Please use this command with caution as it requires a precise syntax. ```shell {{#include ../../../templates/help_templates/logs/raw.md}} diff --git a/guide/src/documentation/configuration/comet-compat-mode.md b/guide/src/documentation/configuration/comet-compat-mode.md index caf581102b..5ce5e07dfe 100644 --- a/guide/src/documentation/configuration/comet-compat-mode.md +++ b/guide/src/documentation/configuration/comet-compat-mode.md @@ -2,7 +2,7 @@ ## Overview -There are two different compatibility modes for CometBFT, one for version v0.34 and one for versions v0.37 and v0.38. In order to verify the compatiblity used Hermes queries the node's `/status` endpoint, which contains the CometBFT version used. This can be an issue if a chain uses a custom version which does not output the version string Hermes expects. To still be able to relay for these chains a configuration can be set in Hermes. +There are two different compatibility modes for CometBFT, one for version v0.34 and one for versions v0.37 and v0.38. In order to verify the compatibility used Hermes queries the node's `/status` endpoint, which contains the CometBFT version used. This can be an issue if a chain uses a custom version which does not output the version string Hermes expects. To still be able to relay for these chains a configuration can be set in Hermes. ## Configuration diff --git a/guide/src/documentation/configuration/performance.md b/guide/src/documentation/configuration/performance.md index 01f0a94de5..d2f768f1f9 100644 --- a/guide/src/documentation/configuration/performance.md +++ b/guide/src/documentation/configuration/performance.md @@ -91,7 +91,7 @@ setting `clear_on_start` to `false` under the `mode.packets` section, Hermes wil relay packets on active channels, provided they match the packet filter, if present. Otherwise Hermes will relay on all active channels. -Please note that because these settings are globa, they will affect the behaviour of Hermes for all chains listed in its configuration. +Please note that because these settings are global, they will affect the behaviour of Hermes for all chains listed in its configuration. Here is how the configuration file should look like in order to disable scanning altogether. @@ -103,7 +103,7 @@ enabled = false # ... -[mode.connnections] +[mode.connections] enabled = false # ... diff --git a/guide/src/documentation/forwarding/index.md b/guide/src/documentation/forwarding/index.md index afc659e75d..df433ecf73 100644 --- a/guide/src/documentation/forwarding/index.md +++ b/guide/src/documentation/forwarding/index.md @@ -62,7 +62,7 @@ Then if the IBC packet forward middleware is active, Chain A can send a packet t Before the packet forward middleware `v3.0.0` the receiver address was used to forward packets. In order for Chain A to send a packet to Chain C, the receiver of the packet had to be set as following: ``` -{intermediate_refund_address}|{foward_port}/{forward_channel}:{final_destination_address} +{intermediate_refund_address}|{forward_port}/{forward_channel}:{final_destination_address} ``` As specified in the packet-forward-middleware module implementation, [packet-forward-middleware](https://github.com/strangelove-ventures/packet-forward-middleware/tree/v2.1.3#example). diff --git a/guide/src/documentation/forwarding/legacy_test.md b/guide/src/documentation/forwarding/legacy_test.md index 7e28ba9fad..8f7999155d 100644 --- a/guide/src/documentation/forwarding/legacy_test.md +++ b/guide/src/documentation/forwarding/legacy_test.md @@ -201,7 +201,7 @@ gaiad version --log_level error --long | head -n4 - wallet1 (cosmos1csdnmydggcyvjd7z8l64z9lpdgmgyr4v7hw5r8) ``` -3. (Optional) Check the balance of the wallets before transfering tokens: +3. (Optional) Check the balance of the wallets before transferring tokens: ```shell {{#template ../../templates/commands/hermes/keys/balance_1.md CHAIN_ID=ibc-0 OPTIONS= --all}} diff --git a/guide/src/documentation/forwarding/test.md b/guide/src/documentation/forwarding/test.md index bf33df7e21..01ebd2562c 100644 --- a/guide/src/documentation/forwarding/test.md +++ b/guide/src/documentation/forwarding/test.md @@ -201,7 +201,7 @@ gaiad version --log_level error --long | head -n4 - wallet1 (cosmos1csdnmydggcyvjd7z8l64z9lpdgmgyr4v7hw5r8) ``` -3. (Optional) Check the balance of the wallets before transfering tokens: +3. (Optional) Check the balance of the wallets before transferring tokens: ```shell {{#template ../../templates/commands/hermes/keys/balance_1.md CHAIN_ID=ibc-0 OPTIONS= --all}} diff --git a/guide/src/documentation/telemetry/integration.md b/guide/src/documentation/telemetry/integration.md index 451f95b9a7..a33af5f656 100644 --- a/guide/src/documentation/telemetry/integration.md +++ b/guide/src/documentation/telemetry/integration.md @@ -22,10 +22,10 @@ acknowledgment_packets_confirmed_total{dst_chain="ibc-1",dst_channel="channel-0" # TYPE backlog_oldest_sequence gauge backlog_oldest_sequence{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",service_name="unknown_service",otel_scope_name="hermes",otel_scope_version=""} 0 backlog_oldest_sequence{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",service_name="unknown_service",otel_scope_name="hermes",otel_scope_version=""} 0 -# HELP backlog_oldest_timestamp Local timestamp for the oldest SendPacket event in the backlog -# TYPE backlog_oldest_timestamp gauge -backlog_oldest_timestamp{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",service_name="unknown_service",otel_scope_name="hermes",otel_scope_version=""} 0 -backlog_oldest_timestamp{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",service_name="unknown_service",otel_scope_name="hermes",otel_scope_version=""} 0 +# HELP backlog_latest_update_timestamp Local timestamp for the last time the backlog metrics have been updated +# TYPE backlog_latest_update_timestamp gauge +backlog_latest_update_timestamp{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",service_name="unknown_service",otel_scope_name="hermes",otel_scope_version=""} 0 +backlog_latest_update_timestamp{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",service_name="unknown_service",otel_scope_name="hermes",otel_scope_version=""} 0 # HELP backlog_size Total number of SendPacket events in the backlog # TYPE backlog_size gauge backlog_size{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",service_name="unknown_service",otel_scope_name="hermes",otel_scope_version=""} 0 diff --git a/guide/src/documentation/telemetry/operators.md b/guide/src/documentation/telemetry/operators.md index c550232beb..96204843c4 100644 --- a/guide/src/documentation/telemetry/operators.md +++ b/guide/src/documentation/telemetry/operators.md @@ -121,7 +121,7 @@ Since Hermes v1, we also introduced 3 metrics that sketch the backlog status of | Name | Description | OpenTelemetry type | Configuration Dependencies | | -------------------------- | -------------------------------------------------------------- | ------------------- | -------------------------- | | `backlog_oldest_sequence` | Sequence number of the oldest SendPacket event in the backlog | `u64` ValueRecorder | Packet workers enabled | -| `backlog_oldest_timestamp` | Local timestamp for the oldest SendPacket event in the backlog | `u64` ValueRecorder | Packet workers enabled | +| `backlog_latest_update_timestamp` | Local timestamp for the last time the backlog metrics have been updated | `u64` ValueRecorder | Packet workers enabled | | `backlog_size` | Total number of SendPacket events in the backlog | `u64` ValueRecorder | Packet workers enabled | @@ -129,9 +129,8 @@ Notes: - The `backlog_size` defines how many IBC packets users sent and were not yet relayed (i.e., received on the destination network, or timed-out). If this metric is increasing, it signals that the packet queue is increasing and there may be some errors in the Hermes logs that need your attention. -- If the `backlog_oldest_sequence` remains unchanged for more than a few minutes, that means that the packet with the respective sequence number is likely blocked -and cannot be relayed. To understand for how long the packet is block, Hermes will populate `backlog_oldest_timestamp` with the local time when it first observed -the `backlog_oldest_sequence` that is blocked. +- The `backlog_latest_update_timestamp` is used to get information on the reliability of the `backlog_*` metrics. If the timestamp doesn't change it means there might be an issue with the metrics. +- __NOTE__: The Hermes instance might miss the acknowledgment of an observed IBC packets relayed, this will cause the `backlog_*` metrics to contain an invalid value. In order to minimise this issue, whenever the Hermes instance clears packets the `backlog_*` metrics will be updated using the queried pending packets. ## How efficient and how secure is the IBC status on each network? diff --git a/guide/src/templates/help_templates/clear/packets.md b/guide/src/templates/help_templates/clear/packets.md index 4a6d12e0ad..35732330c6 100644 --- a/guide/src/templates/help_templates/clear/packets.md +++ b/guide/src/templates/help_templates/clear/packets.md @@ -7,14 +7,25 @@ USAGE: OPTIONS: --counterparty-key-name - use the given signing key for the counterparty chain (default: `counterparty_key_name` + Use the given signing key for the counterparty chain (default: `counterparty_key_name` config) -h, --help Print help information --key-name - use the given signing key for the specified chain (default: `key_name` config) + Use the given signing key for the specified chain (default: `key_name` config) + + --packet-sequences + Sequences of packets to be cleared on the specified chain. Either a single sequence or a + range of sequences can be specified. If not provided, all pending packets will be + cleared on both chains. Each element of the comma-separated list must be either a single + sequence or a range of sequences. Example: `1,10..20` will clear packets with sequences + 1, 10, 11, ..., 20 + + --query-packets-chunk-size + Number of packets to fetch at once from the chain (default: `query_packets_chunk_size` + config) REQUIRED: --chain Identifier of the chain diff --git a/guide/src/templates/help_templates/tx/packet-ack.md b/guide/src/templates/help_templates/tx/packet-ack.md index cb5ad49f54..ef90d47362 100644 --- a/guide/src/templates/help_templates/tx/packet-ack.md +++ b/guide/src/templates/help_templates/tx/packet-ack.md @@ -11,6 +11,12 @@ OPTIONS: --packet-data-query-height Exact height at which the packet data is queried via block_results RPC + --packet-sequences + Sequences of packets to be cleared on `dst-chain`. Either a single sequence or a range + of sequences can be specified. If not provided, all pending ack packets will be cleared. + Each element of the comma-separated list must be either a single sequence or a range of + sequences. Example: `1,10..20` will clear packets with sequences 1, 10, 11, ..., 20 + REQUIRED: --dst-chain Identifier of the destination chain --src-chain Identifier of the source chain diff --git a/guide/src/templates/help_templates/tx/packet-recv.md b/guide/src/templates/help_templates/tx/packet-recv.md index 747a5a7ccc..1d44fe0482 100644 --- a/guide/src/templates/help_templates/tx/packet-recv.md +++ b/guide/src/templates/help_templates/tx/packet-recv.md @@ -11,6 +11,13 @@ OPTIONS: --packet-data-query-height Exact height at which the packet data is queried via block_results RPC + --packet-sequences + Sequences of packets to be cleared on `dst-chain`. Either a single sequence or a range + of sequences can be specified. If not provided, all pending recv or timeout packets will + be cleared. Each element of the comma-separated list must be either a single sequence or + a range of sequences. Example: `1,10..20` will clear packets with sequences 1, 10, 11, + ..., 20 + REQUIRED: --dst-chain Identifier of the destination chain --src-chain Identifier of the source chain diff --git a/guide/src/templates/hermes-version.md b/guide/src/templates/hermes-version.md index 5f152d81a5..a9bbd79f98 100644 --- a/guide/src/templates/hermes-version.md +++ b/guide/src/templates/hermes-version.md @@ -1 +1 @@ -v1.7.3 +v1.7.4 diff --git a/guide/src/tutorials/production/start-relaying.md b/guide/src/tutorials/production/start-relaying.md index 38df477571..90b7e4c6b7 100644 --- a/guide/src/tutorials/production/start-relaying.md +++ b/guide/src/tutorials/production/start-relaying.md @@ -33,7 +33,7 @@ Finally, Hermes is designed to relay without any intervention, however, you migh ## Next steps -Visit the [Telemetry](../../documentation/telemetry/index.md) section to learn how to use the metrics and the [Avanced](../../advanced/index.md) section to learn about Hermes' features and general guidelines for troubleshooting. +Visit the [Telemetry](../../documentation/telemetry/index.md) section to learn how to use the metrics and the [Advanced](../../advanced/index.md) section to learn about Hermes' features and general guidelines for troubleshooting. You can also learn more about [Grafana's features](https://grafana.com/tutorials/grafana-fundamentals/) and learn how to create a [Grafana Managed Alert](https://grafana.com/docs/grafana/latest/alerting/alerting-rules/create-grafana-managed-rule/). diff --git a/tools/integration-test/Cargo.toml b/tools/integration-test/Cargo.toml index 5fc117949f..3b2327aff1 100644 --- a/tools/integration-test/Cargo.toml +++ b/tools/integration-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-integration-test" -version = "0.26.3" +version = "0.26.4" edition = "2021" rust-version = "1.71" license = "Apache-2.0" @@ -20,10 +20,10 @@ ibc-test-framework = { path = "../test-framework" } http = "0.2.9" serde_json = "1" time = "0.3" -toml = "0.7" +toml = "0.8" prost = { version = "0.12" } tonic = { version = "0.10", features = ["tls", "tls-roots"] } -serde = "1.0.166" +serde = "1.0.195" [features] default = [] @@ -40,6 +40,8 @@ clean-workers = [] fee-grant = [] interchain-security = [] celestia = [] +async-icq = [] +juno = [] [[bin]] name = "test_setup_with_binary_channel" @@ -47,3 +49,15 @@ doc = true [dev-dependencies] tempfile = "3.6.0" + +[dependencies.tendermint] +version = "0.34.0" + +[dependencies.tendermint-rpc] +version = "0.34.0" +features = ["http-client"] + +[dependencies.byte-unit] +version = "4.0.19" +default-features = false +features = ["serde"] \ No newline at end of file diff --git a/tools/integration-test/src/tests/async_icq/mod.rs b/tools/integration-test/src/tests/async_icq/mod.rs new file mode 100644 index 0000000000..5123bee973 --- /dev/null +++ b/tools/integration-test/src/tests/async_icq/mod.rs @@ -0,0 +1 @@ +pub mod simple_query; diff --git a/tools/integration-test/src/tests/async_icq/simple_query.rs b/tools/integration-test/src/tests/async_icq/simple_query.rs new file mode 100644 index 0000000000..ad06cea3b0 --- /dev/null +++ b/tools/integration-test/src/tests/async_icq/simple_query.rs @@ -0,0 +1,224 @@ +use ibc_relayer::{ + channel::version::Version, + config::ChainConfig, +}; +use ibc_test_framework::{ + chain::{ + config::{ + set_max_deposit_period, + set_voting_period, + }, + ext::{ + async_icq::AsyncIcqMethodsExt, + bootstrap::ChainBootstrapMethodsExt, + }, + }, + prelude::*, + relayer::channel::{ + assert_eventually_channel_established, + init_channel_version, + }, + util::proposal_status::ProposalStatus, +}; +use tendermint::abci::Event; +use tendermint_rpc::{ + Client, + HttpClient, +}; + +#[test] +fn test_async_icq() -> Result<(), Error> { + run_binary_connection_test(&AsyncIcqTest) +} + +const MAX_DEPOSIT_PERIOD: &str = "10s"; +const VOTING_PERIOD: u64 = 10; +const MAX_RETRIES: usize = 10; + +pub struct AsyncIcqTest; + +impl TestOverrides for AsyncIcqTest { + fn modify_relayer_config(&self, config: &mut Config) { + config.mode.channels.enabled = true; + config.mode.clients.misbehaviour = false; + } + + // Allow Oracle message on host side + fn modify_genesis_file(&self, genesis: &mut serde_json::Value) -> Result<(), Error> { + use serde_json::Value; + + set_max_deposit_period(genesis, MAX_DEPOSIT_PERIOD)?; + set_voting_period(genesis, VOTING_PERIOD)?; + + let allow_messages = genesis + .get_mut("app_state") + .and_then(|app_state| app_state.get_mut("interchainquery")) + .and_then(|ica| ica.get_mut("params")) + .and_then(|params| params.get_mut("allow_queries")) + .and_then(|allow_messages| allow_messages.as_array_mut()); + + if let Some(allow_messages) = allow_messages { + allow_messages.push(Value::String( + "/provenance.oracle.v1.Query/Oracle".to_string(), + )); + Ok(()) + } else { + Err(Error::generic(eyre!("failed to update genesis file"))) + } + } + + fn channel_version(&self) -> Version { + Version::new("icq-1".to_owned()) + } +} + +impl BinaryConnectionTest for AsyncIcqTest { + fn run( + &self, + config: &TestConfig, + relayer: RelayerDriver, + chains: ConnectedChains, + connection: ConnectedConnection, + ) -> Result<(), Error> { + let fee_denom_a: MonoTagged = + MonoTagged::new(Denom::base(&config.native_tokens[0])); + let port_a = DualTagged::new(PortId::oracle()); + let port_b = DualTagged::new(PortId::icqhost()); + let (channel_id_b, channel_id_a) = init_channel_version( + &chains.handle_a, + &chains.handle_b, + &chains.client_id_a(), + &chains.client_id_b(), + &connection.connection_id_a.as_ref(), + &connection.connection_id_b.as_ref(), + &port_a.as_ref(), + &port_b.as_ref(), + Version::new("icq-1".to_owned()), + )?; + + // Check that the oracle channel is eventually established + let _counterparty_channel_id = assert_eventually_channel_established( + chains.handle_b(), + chains.handle_a(), + &channel_id_b.as_ref(), + &port_b.as_ref(), + )?; + + let driver = chains.node_a.chain_driver(); + + let wallet_a = chains.node_a.wallets().user1().cloned(); + + let relayer_a = chains.node_a.wallets().relayer().cloned(); + + driver.update_oracle( + &relayer_a.address().to_string(), + &wallet_a.address().to_string(), + )?; + + driver.value().assert_proposal_status( + driver.value().chain_id.as_str(), + &driver.value().command_path, + &driver.value().home_path, + &driver.value().rpc_listen_address(), + ProposalStatus::VotingPeriod, + "1", + )?; + + driver.vote_proposal(&fee_denom_a.with_amount(381000000u64).to_string())?; + + info!("Assert that the update oracle proposal is eventually passed"); + + driver.value().assert_proposal_status( + driver.value().chain_id.as_str(), + &driver.value().command_path, + &driver.value().home_path, + &driver.value().rpc_listen_address(), + ProposalStatus::Passed, + "1", + )?; + + let query = r#"{"query_version":{}}"#; + chains.node_a.chain_driver().async_icq( + channel_id_a.a_side.channel_id().unwrap(), + query, + &wallet_a.address().to_string(), + )?; + + assert_eventual_async_icq_success(&chains, &relayer)?; + + Ok(()) + } +} + +/// Listen to events on the controller side to assert if the async ICQ is eventually +/// successful +fn assert_eventual_async_icq_success( + chains: &ConnectedChains, + relayer: &RelayerDriver, +) -> Result<(), Error> { + let rpc_addr = match relayer.config.chains.first().unwrap() { + ChainConfig::CosmosSdk(c) => c.rpc_addr.clone(), + }; + + let mut rpc_client = HttpClient::new(rpc_addr).unwrap(); + rpc_client.set_compat_mode(tendermint_rpc::client::CompatMode::V0_34); + + for _ in 0..MAX_RETRIES { + if check_events(chains, &rpc_client).is_ok() { + return Ok(()); + } + sleep(Duration::from_secs(1)); + } + + Err(Error::generic(eyre!( + "failed to find EventOracleQueryError or EventOracleQuerySuccess after {MAX_RETRIES} tries" + ))) +} + +/// Checks if there is an Oracle event in the given events +fn check_events( + chains: &ConnectedChains, + rpc_client: &HttpClient, +) -> Result<(), Error> { + let response = chains + .node_a + .chain_driver() + .value() + .runtime + .block_on(rpc_client.latest_block_results()) + .map_err(|err| Error::generic(eyre!("Failed to fetch block results: {}", err)))?; + + if let Some(txs_results) = response.txs_results { + if let Some(events) = txs_results + .iter() + .find_map(|v| find_oracle_event(&v.events)) + { + return assert_async_icq_success(events); + } + } + + Err(Error::generic(eyre!( + "No EventOracleQueryError or EventOracleQuerySuccess" + ))) +} + +/// This method is used to find the Oracle event triggered by relaying +/// the acknowledgment of the async ICQ +fn find_oracle_event(event: &[Event]) -> Option { + event + .iter() + .find(|&e| e.kind.contains("provenance.oracle.v1.EventOracleQuery")) + .cloned() +} + +/// This method is used to assert if the found Oracle event is successful or not +fn assert_async_icq_success(event: Event) -> Result<(), Error> { + if event.kind == "provenance.oracle.v1.EventOracleQuerySuccess" { + debug!("async query successful with event: {event:#?}"); + Ok(()) + } else { + Err(Error::generic(eyre!( + "async query failed with response event: {event:#?}" + ))) + } +} diff --git a/tools/integration-test/src/tests/clear_packet.rs b/tools/integration-test/src/tests/clear_packet.rs index 9d6c54e01e..f2287c4aa4 100644 --- a/tools/integration-test/src/tests/clear_packet.rs +++ b/tools/integration-test/src/tests/clear_packet.rs @@ -1,8 +1,12 @@ use std::thread; -use ibc_relayer::config::ChainConfig; +use ibc_relayer::{ + chain::counterparty::pending_packet_summary, + config::ChainConfig, +}; use ibc_test_framework::{ prelude::*, + relayer::channel::query_identified_channel_end, util::random::random_u128_range, }; @@ -26,8 +30,16 @@ fn test_clear_packet_override() -> Result<(), Error> { run_binary_channel_test(&ClearPacketOverrideTest) } +#[test] +fn test_clear_packet_sequences() -> Result<(), Error> { + run_binary_channel_test(&ClearPacketSequencesTest) +} + pub struct ClearPacketTest; pub struct ClearPacketRecoveryTest; +pub struct ClearPacketNoScanTest; +pub struct ClearPacketOverrideTest; +pub struct ClearPacketSequencesTest; impl TestOverrides for ClearPacketTest { fn modify_relayer_config(&self, config: &mut Config) { @@ -98,7 +110,7 @@ impl BinaryChannelTest for ClearPacketTest { sleep(Duration::from_secs(1)); - // Spawn the supervisor only after the first IBC trasnfer + // Spawn the supervisor only after the first IBC transfer relayer.with_supervisor(|| { sleep(Duration::from_secs(1)); @@ -189,8 +201,6 @@ impl BinaryChannelTest for ClearPacketRecoveryTest { } } -pub struct ClearPacketNoScanTest; - impl TestOverrides for ClearPacketNoScanTest { fn modify_relayer_config(&self, config: &mut Config) { // Disabling the client workers and clear_on_start should make the relayer not @@ -281,7 +291,7 @@ impl BinaryChannelTest for ClearPacketNoScanTest { &channel.port_a.0, &channel.channel_id_a.0, &denom_a.with_amount(amount1).as_ref(), - &fee_denom_a.with_amount(1200u64).as_ref(), + &fee_denom_a.with_amount(381000000u64).as_ref(), &dst_height, )?; @@ -302,7 +312,6 @@ impl BinaryChannelTest for ClearPacketNoScanTest { }) } } -pub struct ClearPacketOverrideTest; impl TestOverrides for ClearPacketOverrideTest { fn modify_relayer_config(&self, config: &mut Config) { @@ -402,7 +411,7 @@ impl BinaryChannelTest for ClearPacketOverrideTest { &channel.port_a.0, &channel.channel_id_a.0, &denom_a.with_amount(amount1).as_ref(), - &fee_denom_a.with_amount(1200u64).as_ref(), + &fee_denom_a.with_amount(381000000u64).as_ref(), &dst_height, )?; @@ -423,3 +432,132 @@ impl BinaryChannelTest for ClearPacketOverrideTest { }) } } + +impl TestOverrides for ClearPacketSequencesTest { + fn should_spawn_supervisor(&self) -> bool { + false + } +} + +use ibc_relayer::link::{ + Link, + LinkParameters, +}; + +impl BinaryChannelTest for ClearPacketSequencesTest { + fn run( + &self, + _config: &TestConfig, + relayer: RelayerDriver, + chains: ConnectedChains, + channel: ConnectedChannel, + ) -> Result<(), Error> { + const NUM_TRANSFERS: usize = 20; + let packet_config = relayer.config.mode.packets; + + let denom_a = chains.node_a.denom(); + + let wallet_a = chains.node_a.wallets().user1().cloned(); + let wallet_b = chains.node_b.wallets().user1().cloned(); + + let amount = denom_a.with_amount(random_u128_range(1000, 5000)); + + info!("Performing {NUM_TRANSFERS} IBC transfer, which should *not* be relayed"); + + chains.node_a.chain_driver().ibc_transfer_token_multiple( + &channel.port_a.as_ref(), + &channel.channel_id_a.as_ref(), + &wallet_a.as_ref(), + &wallet_b.address(), + &amount.as_ref(), + NUM_TRANSFERS, + None, + )?; + + sleep(Duration::from_secs(5)); + + let channel_end_a = query_identified_channel_end( + chains.handle_a(), + channel.channel_id_a.as_ref(), + channel.port_a.as_ref(), + )?; + + let pending_packets_a = + pending_packet_summary(chains.handle_a(), chains.handle_b(), channel_end_a.value())?; + + info!("Pending packets: {:?}", pending_packets_a); + + assert_eq!(pending_packets_a.unreceived_packets.len(), NUM_TRANSFERS); + + let opts = LinkParameters { + src_port_id: channel.port_a.clone().into_value(), + src_channel_id: channel.channel_id_a.clone().into_value(), + max_memo_size: packet_config.ics20_max_memo_size, + max_receiver_size: packet_config.ics20_max_receiver_size, + }; + + // Clear all even packets + let to_clear = pending_packets_a + .unreceived_packets + .iter() + .filter(|seq| seq.as_u64() % 2 == 0) + .map(|&seq| seq..=seq) + .collect::>(); + + info!("Packets to clear: {:?}", to_clear); + + let link = Link::new_from_opts( + chains.handle_a().clone(), + chains.handle_b().clone(), + opts, + false, + false, + )?; + + info!("Clearing all even packets ({})", to_clear.len()); + + link.relay_recv_packet_and_timeout_messages(to_clear) + .unwrap(); + + sleep(Duration::from_secs(10)); + + let pending_packets = + pending_packet_summary(chains.handle_a(), chains.handle_b(), channel_end_a.value())?; + + info!("Pending packets: {pending_packets:?}"); + + assert_eq!(pending_packets.unreceived_packets.len(), NUM_TRANSFERS / 2); + assert_eq!(pending_packets.unreceived_acks.len(), NUM_TRANSFERS / 2); + + let to_clear = pending_packets + .unreceived_acks + .iter() + .map(|&seq| seq..=seq) + .collect::>(); + + info!("Packets to clear: {to_clear:?}"); + + info!("Clearing all unreceived ack packets ({})", to_clear.len()); + + let rev_link = link.reverse(false, false).unwrap(); + rev_link.relay_ack_packet_messages(to_clear).unwrap(); + + let pending_packets_a = + pending_packet_summary(chains.handle_a(), chains.handle_b(), channel_end_a.value())?; + + info!("Pending packets: {pending_packets_a:?}"); + + assert_eq!(pending_packets_a.unreceived_acks.len(), 0); + assert_eq!( + pending_packets_a.unreceived_packets.len(), + NUM_TRANSFERS / 2 + ); + + info!( + "Successfully cleared all even packets, remains {} odd packets", + pending_packets_a.unreceived_packets.len() + ); + + Ok(()) + } +} diff --git a/tools/integration-test/src/tests/client_settings.rs b/tools/integration-test/src/tests/client_settings.rs index f712dbe39b..6553d401a8 100644 --- a/tools/integration-test/src/tests/client_settings.rs +++ b/tools/integration-test/src/tests/client_settings.rs @@ -39,8 +39,7 @@ impl TestOverrides for ClientDefaultsTest { chain_config_a.clock_drift = Duration::from_secs(3); chain_config_a.max_block_time = Duration::from_secs(5); chain_config_a.trusting_period = Some(Duration::from_secs(120_000)); - chain_config_a.trust_threshold = - TrustThreshold::new(13, 23).unwrap().try_into().unwrap(); + chain_config_a.trust_threshold = TrustThreshold::new(13, 23).unwrap(); } } @@ -49,7 +48,7 @@ impl TestOverrides for ClientDefaultsTest { chain_config_b.clock_drift = Duration::from_secs(6); chain_config_b.max_block_time = Duration::from_secs(15); chain_config_b.trusting_period = Some(Duration::from_secs(340_000)); - chain_config_b.trust_threshold = TrustThreshold::TWO_THIRDS.try_into().unwrap(); + chain_config_b.trust_threshold = TrustThreshold::TWO_THIRDS; } } } diff --git a/tools/integration-test/src/tests/client_upgrade.rs b/tools/integration-test/src/tests/client_upgrade.rs index ef532d8c66..e072b8bc22 100644 --- a/tools/integration-test/src/tests/client_upgrade.rs +++ b/tools/integration-test/src/tests/client_upgrade.rs @@ -32,6 +32,7 @@ use ibc_test_framework::{ chain::{ config::{ set_max_deposit_period, + set_min_deposit_amount, set_voting_period, }, ext::bootstrap::ChainBootstrapMethodsExt, @@ -44,6 +45,7 @@ const MAX_DEPOSIT_PERIOD: &str = "10s"; const VOTING_PERIOD: u64 = 10; const DELTA_HEIGHT: u64 = 15; const WAIT_CHAIN_UPGRADE: Duration = Duration::from_secs(4); +const MIN_DEPOSIT: u64 = 10000000u64; #[test] fn test_client_upgrade() -> Result<(), Error> { @@ -74,6 +76,9 @@ impl TestOverrides for ClientUpgradeTestOverrides { fn modify_genesis_file(&self, genesis: &mut serde_json::Value) -> Result<(), Error> { set_max_deposit_period(genesis, MAX_DEPOSIT_PERIOD)?; set_voting_period(genesis, VOTING_PERIOD)?; + // Set the min deposit amount the same as the deposit of the Upgrade proposal to + // assure that the proposal will go to voting period + set_min_deposit_amount(genesis, MIN_DEPOSIT)?; Ok(()) } } @@ -129,7 +134,7 @@ impl BinaryChainTest for ClientUpgradeTest { .map_err(handle_generic_error)?; // Vote on the proposal so the chain will upgrade - driver.vote_proposal(&fee_denom_a.with_amount(1200u64).to_string())?; + driver.vote_proposal(&fee_denom_a.with_amount(381000000u64).to_string())?; info!("Assert that the chain upgrade proposal is eventually passed"); @@ -275,7 +280,7 @@ impl BinaryChainTest for HeightTooHighClientUpgradeTest { .map_err(handle_generic_error)?; // Vote on the proposal so the chain will upgrade - driver.vote_proposal(&fee_denom_a.with_amount(1200u64).to_string())?; + driver.vote_proposal(&fee_denom_a.with_amount(381000000u64).to_string())?; // The application height reports a height of 1 less than the height according to Tendermint client_upgrade_height.increment(); @@ -372,7 +377,7 @@ impl BinaryChainTest for HeightTooLowClientUpgradeTest { .map_err(handle_generic_error)?; // Vote on the proposal so the chain will upgrade - driver.vote_proposal(&fee_denom_a.with_amount(1200u64).to_string())?; + driver.vote_proposal(&fee_denom_a.with_amount(381000000u64).to_string())?; // The application height reports a height of 1 less than the height according to Tendermint client_upgrade_height @@ -436,7 +441,7 @@ fn create_upgrade_plan( // Create and send an chain upgrade proposal Ok(UpgradePlanOptions { src_client_id, - amount: 10000000u64, + amount: MIN_DEPOSIT, denom: fee_denom_a.to_string(), height_offset: DELTA_HEIGHT, upgraded_chain_id: upgraded_chain_id.clone(), diff --git a/tools/integration-test/src/tests/connection_delay.rs b/tools/integration-test/src/tests/connection_delay.rs index 403d3ddc32..31210aa147 100644 --- a/tools/integration-test/src/tests/connection_delay.rs +++ b/tools/integration-test/src/tests/connection_delay.rs @@ -92,7 +92,7 @@ impl BinaryChannelTest for ConnectionDelayTest { assert_gt( &format!( - "Expect IBC transfer to only be successfull after {}s", + "Expect IBC transfer to only be successful after {}s", CONNECTION_DELAY.as_secs() ), &(time2 - time1).try_into().unwrap(), diff --git a/tools/integration-test/src/tests/denom_trace.rs b/tools/integration-test/src/tests/denom_trace.rs index f5a7262ad6..daada2ff45 100644 --- a/tools/integration-test/src/tests/denom_trace.rs +++ b/tools/integration-test/src/tests/denom_trace.rs @@ -13,7 +13,7 @@ pub struct IbcDenomTraceTest; impl TestOverrides for IbcDenomTraceTest {} /// In order to test the denom_trace at first transfer IBC tokens from Chain A -/// to Chain B, and then retrieving the trace hash of the transfered tokens. +/// to Chain B, and then retrieving the trace hash of the transferred tokens. /// The trace hash is used to query the denom_trace and the result is verified. impl BinaryChannelTest for IbcDenomTraceTest { fn run( diff --git a/tools/integration-test/src/tests/execute_schedule.rs b/tools/integration-test/src/tests/execute_schedule.rs index 6868de91ac..5cb10a9a86 100644 --- a/tools/integration-test/src/tests/execute_schedule.rs +++ b/tools/integration-test/src/tests/execute_schedule.rs @@ -41,15 +41,18 @@ impl BinaryChannelTest for ExecuteScheduleTest { fn run( &self, _config: &TestConfig, - _relayer: RelayerDriver, + relayer: RelayerDriver, chains: ConnectedChains, channel: ConnectedChannel, ) -> Result<(), Error> { let amount1 = random_u128_range(1000, 5000); + let packet_config = relayer.config.mode.packets; let chain_a_link_opts = LinkParameters { src_port_id: channel.port_a.clone().into_value(), src_channel_id: channel.channel_id_a.clone().into_value(), + max_memo_size: packet_config.ics20_max_memo_size, + max_receiver_size: packet_config.ics20_max_receiver_size, }; let chain_a_link = Link::new_from_opts( diff --git a/tools/integration-test/src/tests/fee_grant.rs b/tools/integration-test/src/tests/fee_grant.rs index 446cfc1226..704185bf0e 100644 --- a/tools/integration-test/src/tests/fee_grant.rs +++ b/tools/integration-test/src/tests/fee_grant.rs @@ -36,7 +36,7 @@ impl TestOverrides for FeeGrantTest {} impl BinaryChannelTest for FeeGrantTest { fn run( &self, - _config: &TestConfig, + config: &TestConfig, relayer: RelayerDriver, chains: ConnectedChains, channels: ConnectedChannel, @@ -44,6 +44,7 @@ impl BinaryChannelTest for FeeGrantTest { let denom_a = chains.node_a.denom(); let wallet_a = chains.node_a.wallets().user1().cloned(); let wallet_b = chains.node_b.wallets().user1().cloned(); + let fee_denom_a = MonoTagged::new(Denom::base(&config.native_tokens[0])); let a_to_b_amount = 12345u64; let granter = chains @@ -61,10 +62,11 @@ impl BinaryChannelTest for FeeGrantTest { .value() .to_string(); - chains - .node_a - .chain_driver() - .feegrant_grant(&granter, &grantee)?; + chains.node_a.chain_driver().feegrant_grant( + &granter, + &grantee, + &fee_denom_a.with_amount(381000000u64).as_ref(), + )?; // Wait for the feegrant to be processed thread::sleep(Duration::from_secs(5)); @@ -75,7 +77,16 @@ impl BinaryChannelTest for FeeGrantTest { &denom_a, )?; - let gas_denom: MonoTagged = MonoTagged::new(Denom::Base("stake".to_owned())); + let gas_denom_str = match relayer + .config + .chains + .first() + .ok_or_else(|| eyre!("chain configuration is empty"))? + { + ChainConfig::CosmosSdk(chain_config) => chain_config.gas_price.denom.clone(), + }; + + let gas_denom: MonoTagged = MonoTagged::new(Denom::Base(gas_denom_str)); let balance_user1_before = chains .node_a @@ -167,8 +178,8 @@ impl TestOverrides for NoFeeGrantTest {} impl BinaryChannelTest for NoFeeGrantTest { fn run( &self, - _config: &TestConfig, - _relayer: RelayerDriver, + config: &TestConfig, + relayer: RelayerDriver, chains: ConnectedChains, channels: ConnectedChannel, ) -> Result<(), Error> { @@ -176,6 +187,7 @@ impl BinaryChannelTest for NoFeeGrantTest { let wallet_a = chains.node_a.wallets().user1().cloned(); let wallet_a2 = chains.node_a.wallets().user2().cloned(); let wallet_b = chains.node_b.wallets().user1().cloned(); + let fee_denom_a = MonoTagged::new(Denom::base(&config.native_tokens[0])); let a_to_b_amount = 12345u64; let granter = chains @@ -193,10 +205,11 @@ impl BinaryChannelTest for NoFeeGrantTest { .value() .to_string(); - chains - .node_a - .chain_driver() - .feegrant_grant(&granter, &grantee)?; + chains.node_a.chain_driver().feegrant_grant( + &granter, + &grantee, + &fee_denom_a.with_amount(381000000u64).as_ref(), + )?; // Wait for the feegrant to be processed thread::sleep(Duration::from_secs(5)); @@ -207,7 +220,16 @@ impl BinaryChannelTest for NoFeeGrantTest { &denom_a, )?; - let gas_denom: MonoTagged = MonoTagged::new(Denom::Base("stake".to_owned())); + let gas_denom_str = match relayer + .config + .chains + .first() + .ok_or_else(|| eyre!("chain configuration is empty"))? + { + ChainConfig::CosmosSdk(chain_config) => chain_config.gas_price.denom.clone(), + }; + + let gas_denom: MonoTagged = MonoTagged::new(Denom::Base(gas_denom_str)); let balance_user1_before = chains .node_a diff --git a/tools/integration-test/src/tests/forward/forward_transfer.rs b/tools/integration-test/src/tests/forward/forward_transfer.rs index 7f06c0c95f..93c0aced5a 100644 --- a/tools/integration-test/src/tests/forward/forward_transfer.rs +++ b/tools/integration-test/src/tests/forward/forward_transfer.rs @@ -5,7 +5,7 @@ //! //! - The `MisspelledMemoFieldsIbcForwardTransferTest` tests the case where the //! fields inside the memo are misspelled: -//! - Misspelled `forward`: The intemediary chain will not understand the transfer +//! - Misspelled `forward`: The intermediary chain will not understand the transfer //! must be forwarded, and will thus keep the tokens. //! - Misspelled `receiver`: The intermediary chain will not find the receiver field //! and will thus refund the sender. @@ -391,7 +391,7 @@ impl NaryChannelTest<3> for MisspelledMemoFieldsIbcForwardTransferTest { )?; info!( - "check that only the sender lost {} tokens and the intemediary chain received {} tokens", + "check that only the sender lost {} tokens and the intermediary chain received {} tokens", a_to_c_amount, a_to_c_amount ); diff --git a/tools/integration-test/src/tests/ica.rs b/tools/integration-test/src/tests/ica.rs index cec5b4b7ef..47237bf578 100644 --- a/tools/integration-test/src/tests/ica.rs +++ b/tools/integration-test/src/tests/ica.rs @@ -3,46 +3,12 @@ use std::{ str::FromStr, }; -use ibc_relayer::{ - chain::{ - handle::ChainHandle, - tracking::TrackedMsgs, - }, - config::{ - filter::{ - ChannelFilters, - ChannelPolicy, - FilterPattern, - }, - ChainConfig, - PacketFilter, - }, - event::IbcEventWithHeight, -}; -use ibc_relayer_types::{ - applications::{ - ics27_ica::{ - cosmos_tx::CosmosTx, - msgs::send_tx::MsgSendTx, - packet_data::InterchainAccountPacketData, - }, - transfer::{ - msgs::send::MsgSend, - Amount, - Coin, - }, - }, - bigint::U256, - core::ics04_channel::channel::State, - signer::Signer, - timestamp::Timestamp, - tx_msg::Msg, -}; use ibc_test_framework::{ chain::ext::ica::register_interchain_account, ibc::denom::Denom, prelude::*, relayer::channel::{ + assert_eventually_channel_closed, assert_eventually_channel_established, query_channel_end, }, @@ -64,6 +30,16 @@ fn test_ica_filter_allow() -> Result<(), Error> { ))) } +#[test] +fn test_ica_filter_deny() -> Result<(), Error> { + run_binary_connection_test(&IcaFilterTestDeny) +} + +#[test] +fn test_ica_close_channel() -> Result<(), Error> { + run_binary_connection_test(&ICACloseChannelTest) +} + pub struct IcaFilterTestAllow { packet_filter: PacketFilter, } @@ -198,35 +174,6 @@ impl BinaryConnectionTest for IcaFilterTestAllow { Ok(()) } } - -fn interchain_send_tx( - chain: &ChainA, - from: &Signer, - connection: &ConnectionId, - msg: InterchainAccountPacketData, - relative_timeout: Timestamp, -) -> Result, Error> { - let msg = MsgSendTx { - owner: from.clone(), - connection_id: connection.clone(), - packet_data: msg, - relative_timeout, - }; - - let msg_any = msg.to_any(); - - let tm = TrackedMsgs::new_static(vec![msg_any], "SendTx"); - - chain - .send_messages_and_wait_commit(tm) - .map_err(Error::relayer) -} - -#[test] -fn test_ica_filter_deny() -> Result<(), Error> { - run_binary_connection_test(&IcaFilterTestDeny) -} - pub struct IcaFilterTestDeny; impl TestOverrides for IcaFilterTestDeny { @@ -276,3 +223,163 @@ impl BinaryConnectionTest for IcaFilterTestDeny { ) } } + +pub struct ICACloseChannelTest; + +impl TestOverrides for ICACloseChannelTest { + fn modify_relayer_config(&self, config: &mut Config) { + config.mode.channels.enabled = true; + + config.mode.clients.misbehaviour = false; + } + + fn should_spawn_supervisor(&self) -> bool { + false + } +} + +impl BinaryConnectionTest for ICACloseChannelTest { + fn run( + &self, + _config: &TestConfig, + relayer: RelayerDriver, + chains: ConnectedChains, + connection: ConnectedConnection, + ) -> Result<(), Error> { + let stake_denom: MonoTagged = MonoTagged::new(Denom::base("stake")); + let (wallet, ica_address, controller_channel_id, controller_port_id) = relayer + .with_supervisor(|| { + // Register an interchain account on behalf of + // controller wallet `user1` where the counterparty chain is the interchain accounts host. + let (wallet, controller_channel_id, controller_port_id) = + register_interchain_account(&chains.node_a, chains.handle_a(), &connection)?; + + // Check that the corresponding ICA channel is eventually established. + let _counterparty_channel_id = assert_eventually_channel_established( + chains.handle_a(), + chains.handle_b(), + &controller_channel_id.as_ref(), + &controller_port_id.as_ref(), + )?; + + // Query the controller chain for the address of the ICA wallet on the host chain. + let ica_address = chains.node_a.chain_driver().query_interchain_account( + &wallet.address(), + &connection.connection_id_a.as_ref(), + )?; + + chains.node_b.chain_driver().assert_eventual_wallet_amount( + &ica_address.as_ref(), + &stake_denom.with_amount(0u64).as_ref(), + )?; + + Ok(( + wallet, + ica_address, + controller_channel_id, + controller_port_id, + )) + })?; + + // Send funds to the interchain account. + let ica_fund = 42000u64; + + chains.node_b.chain_driver().local_transfer_token( + &chains.node_b.wallets().user1(), + &ica_address.as_ref(), + &stake_denom.with_amount(ica_fund).as_ref(), + )?; + + chains.node_b.chain_driver().assert_eventual_wallet_amount( + &ica_address.as_ref(), + &stake_denom.with_amount(ica_fund).as_ref(), + )?; + + let amount = 12345u64; + + let msg = MsgSend { + from_address: ica_address.to_string(), + to_address: chains.node_b.wallets().user2().address().to_string(), + amount: vec![Coin { + denom: stake_denom.to_string(), + amount: Amount(U256::from(amount)), + }], + }; + + let raw_msg = msg.to_any(); + + let cosmos_tx = CosmosTx { + messages: vec![raw_msg], + }; + + let raw_cosmos_tx = cosmos_tx.to_any(); + + let interchain_account_packet_data = InterchainAccountPacketData::new(raw_cosmos_tx.value); + + let signer = Signer::from_str(&wallet.address().to_string()).unwrap(); + + let balance_user2 = chains.node_b.chain_driver().query_balance( + &chains.node_b.wallets().user2().address(), + &stake_denom.as_ref(), + )?; + + interchain_send_tx( + chains.handle_a(), + &signer, + &connection.connection_id_a.0, + interchain_account_packet_data, + Timestamp::from_nanoseconds(1000000000).unwrap(), + )?; + + sleep(Duration::from_nanos(3000000000)); + + relayer.with_supervisor(|| { + // Check that user2 has not received the sent amount. + chains.node_b.chain_driver().assert_eventual_wallet_amount( + &chains.node_b.wallets().user2().address(), + &(balance_user2).as_ref(), + )?; + sleep(Duration::from_secs(5)); + + // Check that the ICA account's balance has not been debited the sent amount. + chains.node_b.chain_driver().assert_eventual_wallet_amount( + &ica_address.as_ref(), + &stake_denom.with_amount(ica_fund).as_ref(), + )?; + + info!("Check that the channel closed after packet timeout..."); + + assert_eventually_channel_closed( + &chains.handle_a, + &chains.handle_b, + &controller_channel_id.as_ref(), + &controller_port_id.as_ref(), + )?; + + Ok(()) + }) + } +} + +fn interchain_send_tx( + chain: &ChainA, + from: &Signer, + connection: &ConnectionId, + msg: InterchainAccountPacketData, + relative_timeout: Timestamp, +) -> Result, Error> { + let msg = MsgSendTx { + owner: from.clone(), + connection_id: connection.clone(), + packet_data: msg, + relative_timeout, + }; + + let msg_any = msg.to_any(); + + let tm = TrackedMsgs::new_static(vec![msg_any], "SendTx"); + + chain + .send_messages_and_wait_commit(tm) + .map_err(Error::relayer) +} diff --git a/tools/integration-test/src/tests/ics20_filter/memo.rs b/tools/integration-test/src/tests/ics20_filter/memo.rs new file mode 100644 index 0000000000..c8e2b30b0e --- /dev/null +++ b/tools/integration-test/src/tests/ics20_filter/memo.rs @@ -0,0 +1,130 @@ +use byte_unit::Byte; +use ibc_relayer::config::types::ics20_field_size_limit::Ics20FieldSizeLimit; +use ibc_test_framework::prelude::*; + +#[test] +fn test_memo_filter() -> Result<(), Error> { + run_binary_channel_test(&IbcMemoFilterTest) +} + +const MEMO_SIZE_LIMIT: usize = 2000; + +pub struct IbcMemoFilterTest; + +impl TestOverrides for IbcMemoFilterTest { + fn modify_relayer_config(&self, config: &mut Config) { + config.mode.packets.ics20_max_memo_size = + Ics20FieldSizeLimit::new(true, Byte::from_bytes(MEMO_SIZE_LIMIT as u64)); + + config.mode.clients.misbehaviour = false; + } +} + +impl BinaryChannelTest for IbcMemoFilterTest { + fn run( + &self, + _config: &TestConfig, + _relayer: RelayerDriver, + chains: ConnectedChains, + channel: ConnectedChannel, + ) -> Result<(), Error> { + let denom_a = chains.node_a.denom(); + + let wallet_a = chains.node_a.wallets().user1().cloned(); + let wallet_b = chains.node_b.wallets().user1().cloned(); + + let balance_a = chains + .node_a + .chain_driver() + .query_balance(&wallet_a.address(), &denom_a)?; + + let a_to_b_amount = 23456u128; + + info!( + "Sending invalid IBC transfer from chain {} to chain {} with amount of {} {}", + chains.chain_id_a(), + chains.chain_id_b(), + a_to_b_amount, + denom_a + ); + + // Create a memo bigger than the allowed limit + let memo = "a".repeat(MEMO_SIZE_LIMIT + 1); + + chains + .node_a + .chain_driver() + .ibc_transfer_token_with_memo_and_timeout( + &channel.port_a.as_ref(), + &channel.channel_id_a.as_ref(), + &wallet_a.as_ref(), + &wallet_b.address(), + &denom_a.with_amount(a_to_b_amount).as_ref(), + Some(memo), + None, + )?; + + // Wait a bit before asserting that the transaction has not been relayed + sleep(Duration::from_secs(10)); + + info!("Assert that the IBC transfer was filtered"); + + let denom_b = derive_ibc_denom( + &channel.port_b.as_ref(), + &channel.channel_id_b.as_ref(), + &denom_a, + )?; + + // The sender tokens will be escrowed since the packet will not have timed out + chains.node_a.chain_driver().assert_eventual_wallet_amount( + &wallet_a.address(), + &(balance_a.clone() - a_to_b_amount).as_ref(), + )?; + + // The receiver will not have received the tokens since the packet should be + // filtered + chains.node_b.chain_driver().assert_eventual_wallet_amount( + &wallet_b.address(), + &denom_b.with_amount(0u64).as_ref(), + )?; + + // Retry the IBC transfer without the memo field + chains + .node_a + .chain_driver() + .ibc_transfer_token_with_memo_and_timeout( + &channel.port_a.as_ref(), + &channel.channel_id_a.as_ref(), + &wallet_a.as_ref(), + &wallet_b.address(), + &denom_a.with_amount(a_to_b_amount).as_ref(), + None, + None, + )?; + + info!( + "Waiting for user on chain B to receive IBC transferred amount of {}", + a_to_b_amount + ); + + // The sender tokens from the first transaction will still be + // escrowed since the packet will not have timed out + chains.node_a.chain_driver().assert_eventual_wallet_amount( + &wallet_a.address(), + &(balance_a - a_to_b_amount - a_to_b_amount).as_ref(), + )?; + + chains.node_b.chain_driver().assert_eventual_wallet_amount( + &wallet_b.address(), + &denom_b.with_amount(a_to_b_amount).as_ref(), + )?; + + info!( + "successfully performed IBC transfer from chain {} to chain {}", + chains.chain_id_a(), + chains.chain_id_b(), + ); + + Ok(()) + } +} diff --git a/tools/integration-test/src/tests/ics20_filter/mod.rs b/tools/integration-test/src/tests/ics20_filter/mod.rs new file mode 100644 index 0000000000..80d0261cba --- /dev/null +++ b/tools/integration-test/src/tests/ics20_filter/mod.rs @@ -0,0 +1 @@ +pub mod memo; diff --git a/tools/integration-test/src/tests/mod.rs b/tools/integration-test/src/tests/mod.rs index 2badc7caae..b56c22b49d 100644 --- a/tools/integration-test/src/tests/mod.rs +++ b/tools/integration-test/src/tests/mod.rs @@ -10,7 +10,7 @@ pub mod client_expiration; pub mod client_filter; pub mod client_refresh; pub mod client_settings; -#[cfg(not(feature = "celestia"))] +#[cfg(not(any(feature = "celestia", feature = "juno")))] pub mod client_upgrade; pub mod connection_delay; pub mod consensus_states; @@ -18,6 +18,7 @@ pub mod denom_trace; pub mod error_events; pub mod execute_schedule; pub mod handshake_on_start; +pub mod ics20_filter; pub mod memo; pub mod python; pub mod query_packet; @@ -27,6 +28,9 @@ pub mod tendermint; pub mod ternary_transfer; pub mod transfer; +#[cfg(any(doc, feature = "async-icq"))] +pub mod async_icq; + #[cfg(any(doc, feature = "ics29-fee"))] pub mod fee; diff --git a/tools/integration-test/src/tests/ordered_channel_clear.rs b/tools/integration-test/src/tests/ordered_channel_clear.rs index 83a387c603..0774bbeb60 100644 --- a/tools/integration-test/src/tests/ordered_channel_clear.rs +++ b/tools/integration-test/src/tests/ordered_channel_clear.rs @@ -91,11 +91,12 @@ impl BinaryChannelTest for OrderedChannelClearTest { fn run( &self, _config: &TestConfig, - _relayer: RelayerDriver, + relayer: RelayerDriver, chains: ConnectedChains, channel: ConnectedChannel, ) -> Result<(), Error> { let denom_a = chains.node_a.denom(); + let packet_config = relayer.config.mode.packets; let wallet_a = chains.node_a.wallets().user1().cloned(); let wallet_b = chains.node_b.wallets().user1().cloned(); @@ -131,6 +132,8 @@ impl BinaryChannelTest for OrderedChannelClearTest { let chain_a_link_opts = LinkParameters { src_port_id: channel.port_a.clone().into_value(), src_channel_id: channel.channel_id_a.clone().into_value(), + max_memo_size: packet_config.ics20_max_memo_size, + max_receiver_size: packet_config.ics20_max_receiver_size, }; let chain_a_link = Link::new_from_opts( @@ -144,6 +147,8 @@ impl BinaryChannelTest for OrderedChannelClearTest { let chain_b_link_opts = LinkParameters { src_port_id: channel.port_b.clone().into_value(), src_channel_id: channel.channel_id_b.clone().into_value(), + max_memo_size: packet_config.ics20_max_memo_size, + max_receiver_size: packet_config.ics20_max_receiver_size, }; let chain_b_link = Link::new_from_opts( @@ -237,11 +242,12 @@ impl BinaryChannelTest for OrderedChannelClearEqualCLITest { fn run( &self, _config: &TestConfig, - _relayer: RelayerDriver, + relayer: RelayerDriver, chains: ConnectedChains, channel: ConnectedChannel, ) -> Result<(), Error> { let num_msgs = 5_usize; + let packet_config = relayer.config.mode.packets; info!( "Performing {} IBC transfers on an ordered channel", @@ -277,6 +283,8 @@ impl BinaryChannelTest for OrderedChannelClearEqualCLITest { let chain_a_link_opts = LinkParameters { src_port_id: channel.port_a.clone().into_value(), src_channel_id: channel.channel_id_a.into_value(), + max_memo_size: packet_config.ics20_max_memo_size, + max_receiver_size: packet_config.ics20_max_receiver_size, }; let chain_a_link = Link::new_from_opts( @@ -288,9 +296,10 @@ impl BinaryChannelTest for OrderedChannelClearEqualCLITest { )?; let events_returned: Vec = chain_a_link - .relay_recv_packet_and_timeout_messages_with_packet_data_query_height(Some( - clear_height, - )) + .relay_recv_packet_and_timeout_messages_with_packet_data_query_height( + vec![], + Some(clear_height), + ) .unwrap(); info!("recv packets sent, chain events: {:?}", events_returned); diff --git a/tools/integration-test/src/tests/query_packet.rs b/tools/integration-test/src/tests/query_packet.rs index 6de2842c2c..6567c0bfab 100644 --- a/tools/integration-test/src/tests/query_packet.rs +++ b/tools/integration-test/src/tests/query_packet.rs @@ -41,11 +41,12 @@ impl BinaryChannelTest for QueryPacketPendingTest { fn run( &self, _config: &TestConfig, - _relayer: RelayerDriver, + relayer: RelayerDriver, chains: ConnectedChains, channel: ConnectedChannel, ) -> Result<(), Error> { let denom_a = chains.node_a.denom(); + let packet_config = relayer.config.mode.packets; let wallet_a = chains.node_a.wallets().user1().cloned(); let wallet_b = chains.node_b.wallets().user1().cloned(); @@ -70,6 +71,8 @@ impl BinaryChannelTest for QueryPacketPendingTest { let opts = LinkParameters { src_port_id: channel.port_a.clone().into_value(), src_channel_id: channel.channel_id_a.clone().into_value(), + max_memo_size: packet_config.ics20_max_memo_size, + max_receiver_size: packet_config.ics20_max_receiver_size, }; let link = Link::new_from_opts( chains.handle_a().clone(), @@ -92,7 +95,7 @@ impl BinaryChannelTest for QueryPacketPendingTest { assert!(summary.unreceived_acks.is_empty()); // Receive the packet on the destination chain - link.relay_recv_packet_and_timeout_messages()?; + link.relay_recv_packet_and_timeout_messages(vec![])?; let summary = pending_packet_summary(chains.handle_a(), chains.handle_b(), channel_end.value())?; @@ -102,7 +105,7 @@ impl BinaryChannelTest for QueryPacketPendingTest { // Acknowledge the packet on the source chain let link = link.reverse(false, false)?; - link.relay_ack_packet_messages()?; + link.relay_ack_packet_messages(vec![])?; let summary = pending_packet_summary(chains.handle_a(), chains.handle_b(), channel_end.value())?; diff --git a/tools/integration-test/src/tests/supervisor.rs b/tools/integration-test/src/tests/supervisor.rs index 294b5c57c8..5ea032b14b 100644 --- a/tools/integration-test/src/tests/supervisor.rs +++ b/tools/integration-test/src/tests/supervisor.rs @@ -241,7 +241,7 @@ impl BinaryChannelTest for SupervisorScanTest { &channels.port_a.0, &channels.channel_id_a.0, &denom_a.with_amount(1000u64).as_ref(), - &fee_denom_a.with_amount(1200u64).as_ref(), + &fee_denom_a.with_amount(381000000u64).as_ref(), &dst_height, )?; diff --git a/tools/integration-test/src/tests/tendermint/mod.rs b/tools/integration-test/src/tests/tendermint/mod.rs index 7b0ee374b1..7a53c41416 100644 --- a/tools/integration-test/src/tests/tendermint/mod.rs +++ b/tools/integration-test/src/tests/tendermint/mod.rs @@ -1 +1,5 @@ +/// Juno v17.1.1 forces a 2 second block time, which causes this test +/// to fail. +/// https://github.com/CosmosContracts/juno/blob/v17.1.1/cmd/junod/cmd/root.go#L93 +#[cfg(not(feature = "juno"))] pub mod sequential; diff --git a/tools/test-framework/Cargo.toml b/tools/test-framework/Cargo.toml index 7c8e0976ed..8a380f5f7d 100644 --- a/tools/test-framework/Cargo.toml +++ b/tools/test-framework/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ibc-test-framework" -version = "0.26.3" +version = "0.26.4" edition = "2021" license = "Apache-2.0" readme = "README.md" @@ -14,9 +14,9 @@ description = """ """ [dependencies] -ibc-relayer-types = { version = "=0.26.3", path = "../../crates/relayer-types" } -ibc-relayer = { version = "=0.26.3", path = "../../crates/relayer" } -ibc-relayer-cli = { version = "=1.7.3", path = "../../crates/relayer-cli" } +ibc-relayer-types = { version = "=0.26.4", path = "../../crates/relayer-types" } +ibc-relayer = { version = "=0.26.4", path = "../../crates/relayer" } +ibc-relayer-cli = { version = "=1.7.4", path = "../../crates/relayer-cli" } ibc-proto = { version = "0.39.0", features = ["serde"] } tendermint-rpc = { version = "0.34.0", features = ["http-client", "websocket-client"] } @@ -32,13 +32,13 @@ serde = "1.0" serde_json = "1" serde_yaml = "0.9.16" itertools = "0.10" -toml = "0.7" +toml = "0.8" subtle-encoding = "0.5.1" sha2 = "0.10.6" -crossbeam-channel = "0.5.8" +crossbeam-channel = "0.5.11" semver = "1.0.16" flex-error = "0.4.4" prost = { version = "0.12" } tonic = { version = "0.10", features = ["tls", "tls-roots"] } hdpath = "0.6.3" -once_cell = "1.18.0" +once_cell = "1.19.0" diff --git a/tools/test-framework/src/bootstrap/binary/chain.rs b/tools/test-framework/src/bootstrap/binary/chain.rs index 6d791604fd..788f203895 100644 --- a/tools/test-framework/src/bootstrap/binary/chain.rs +++ b/tools/test-framework/src/bootstrap/binary/chain.rs @@ -188,7 +188,7 @@ pub fn pad_client_ids( [`FullNode`]. The function accepts a proxy type `Seed` that should be unique - accross multiple calls so that the returned [`ChainHandle`] + across multiple calls so that the returned [`ChainHandle`] have a unique type. For example, the following test should fail to compile: diff --git a/tools/test-framework/src/bootstrap/consumer.rs b/tools/test-framework/src/bootstrap/consumer.rs index b62fa493ec..72d8227a08 100644 --- a/tools/test-framework/src/bootstrap/consumer.rs +++ b/tools/test-framework/src/bootstrap/consumer.rs @@ -81,8 +81,16 @@ pub fn bootstrap_consumer_node( chain_driver.update_genesis_file("genesis.json", genesis_modifier)?; // The configuration `soft_opt_out_threshold` might be missing and is required // for chains such as Neutron + let globalfee_minimum_gas = serde_json::json!([ + { + "denom": "stake", + "amount": "0", + } + ]); chain_driver.update_genesis_file("genesis.json", |genesis| { config::set_soft_opt_out_threshold(genesis, "0.05")?; + config::consensus_params_max_gas(genesis, "3000000")?; + config::globalfee_minimum_gas_prices(genesis, globalfee_minimum_gas)?; Ok(()) })?; diff --git a/tools/test-framework/src/bootstrap/nary/channel.rs b/tools/test-framework/src/bootstrap/nary/channel.rs index c0ae7f6a1c..b96b123bf8 100644 --- a/tools/test-framework/src/bootstrap/nary/channel.rs +++ b/tools/test-framework/src/bootstrap/nary/channel.rs @@ -1,5 +1,5 @@ /*! - Functions for bootstrapping N-ary number of chanels. + Functions for bootstrapping N-ary number of channels. */ use core::{ @@ -129,7 +129,7 @@ pub fn bootstrap_channels_with_connections COSMOS_HD_PATH, Self::Evmos => EVMOS_HD_PATH, Self::Astria => todo!("Astria HD path not yet implemented"), + Self::Provenance => PROVENANCE_HD_PATH, } } @@ -41,6 +44,7 @@ impl ChainType { } Self::Evmos => ChainId::from_string(&format!("evmos_9000-{prefix}")), Self::Astria => todo!("Astria chain id not yet implemented"), + Self::Provenance => ChainId::from_string(&format!("pio-mainnet-{prefix}")), } } @@ -55,6 +59,7 @@ impl ChainType { res.push(format!("localhost:{json_rpc_port}")); } Self::Astria => todo!("Astria extra start args not yet implemented"), + Self::Provenance => {} } res } @@ -66,6 +71,7 @@ impl ChainType { pk_type: "/ethermint.crypto.v1.ethsecp256k1.PubKey".to_string(), }, Self::Astria => AddressType::Astria, + Self::Provenance => AddressType::default(), } } } @@ -81,6 +87,7 @@ impl FromStr for ChainType { name if name.contains("icad") => Ok(ChainType::Cosmos), name if name.contains("evmosd") => Ok(ChainType::Evmos), name if name.contains("astria") => Ok(ChainType::Astria), + name if name.contains("provenanced") => Ok(ChainType::Provenance), _ => Ok(ChainType::Cosmos), } } diff --git a/tools/test-framework/src/chain/cli/async_icq.rs b/tools/test-framework/src/chain/cli/async_icq.rs new file mode 100644 index 0000000000..ed97f32893 --- /dev/null +++ b/tools/test-framework/src/chain/cli/async_icq.rs @@ -0,0 +1,107 @@ +use std::str; + +use crate::{ + chain::exec::simple_exec, + error::Error, +}; + +pub fn update_oracle( + chain_id: &str, + command_path: &str, + home_path: &str, + rpc_listen_address: &str, + account: &str, + relayer: &str, +) -> Result<(), Error> { + simple_exec( + chain_id, + command_path, + &[ + "--home", + home_path, + "--chain-id", + chain_id, + "--node", + rpc_listen_address, + "-b", + "block", + "tx", + "oracle", + "update", + account, + "--deposit", + "1000000000000nhash", + "--from", + relayer, + "--fees", + "381000000nhash", + "--yes", + ], + )?; + + Ok(()) +} + +pub fn async_icq( + chain_id: &str, + command_path: &str, + home_path: &str, + rpc_listen_address: &str, + channel_id: &str, + query_json: &str, + from: &str, +) -> Result<(), Error> { + simple_exec( + chain_id, + command_path, + &[ + "--home", + home_path, + "--chain-id", + chain_id, + "--node", + rpc_listen_address, + "tx", + "oracle", + "send-query", + channel_id, + query_json, + "-b", + "block", + "--from", + from, + "--fees", + "381000000nhash", + "--yes", + ], + )?; + + Ok(()) +} + +pub fn query_oracle_address( + chain_id: &str, + command_path: &str, + home_path: &str, + rpc_listen_address: &str, +) -> Result { + let exec_output = simple_exec( + chain_id, + command_path, + &[ + "--home", + home_path, + "--chain-id", + chain_id, + "--node", + rpc_listen_address, + "query", + "oracle", + "address", + ], + )?; + let mut address = exec_output.stdout.replace("address: ", ""); + address.pop(); + + Ok(address) +} diff --git a/tools/test-framework/src/chain/cli/fee_grant.rs b/tools/test-framework/src/chain/cli/fee_grant.rs index 176fcdfefb..738f4e991d 100644 --- a/tools/test-framework/src/chain/cli/fee_grant.rs +++ b/tools/test-framework/src/chain/cli/fee_grant.rs @@ -10,6 +10,7 @@ pub fn feegrant_grant( rpc_listen_address: &str, granter: &str, grantee: &str, + fees: &str, ) -> Result<(), Error> { simple_exec( chain_id, @@ -28,6 +29,8 @@ pub fn feegrant_grant( "grant", granter, grantee, + "--fees", + fees, "--yes", ], )?; diff --git a/tools/test-framework/src/chain/cli/mod.rs b/tools/test-framework/src/chain/cli/mod.rs index 88ac42bf9e..5b2415d964 100644 --- a/tools/test-framework/src/chain/cli/mod.rs +++ b/tools/test-framework/src/chain/cli/mod.rs @@ -1,3 +1,4 @@ +pub mod async_icq; pub mod bootstrap; pub mod fee_grant; pub mod host_zone; diff --git a/tools/test-framework/src/chain/config.rs b/tools/test-framework/src/chain/config.rs index d7d96c3e68..29fcc6b956 100644 --- a/tools/test-framework/src/chain/config.rs +++ b/tools/test-framework/src/chain/config.rs @@ -27,6 +27,17 @@ pub fn set_rpc_port(config: &mut Value, port: u16) -> Result<(), Error> { Ok(()) } +pub fn enable_grpc(config: &mut Value) -> Result<(), Error> { + config + .get_mut("grpc") + .ok_or_else(|| eyre!("expect grpc section"))? + .as_table_mut() + .ok_or_else(|| eyre!("expect object"))? + .insert("enable".to_string(), true.into()); + + Ok(()) +} + pub fn set_grpc_port(config: &mut Value, port: u16) -> Result<(), Error> { config .get_mut("grpc") @@ -183,6 +194,31 @@ pub fn set_max_deposit_period(genesis: &mut serde_json::Value, period: &str) -> Ok(()) } +pub fn set_min_deposit_amount( + genesis: &mut serde_json::Value, + min_deposit_amount: u64, +) -> Result<(), Error> { + let min_deposit = genesis + .get_mut("app_state") + .and_then(|app_state| app_state.get_mut("gov")) + .and_then(|gov| get_mut_with_fallback(gov, "params", "deposit_params")) + .and_then(|deposit_params| deposit_params.get_mut("min_deposit")) + .and_then(|min_deposit| min_deposit.as_array_mut()) + .ok_or_else(|| eyre!("failed to find min_deposit in genesis file"))? + .get_mut(0) + .and_then(|min_deposit_entry| min_deposit_entry.as_object_mut()) + .ok_or_else(|| eyre!("failed to find first entry of min_deposit in genesis file"))?; + + min_deposit + .insert( + "amount".to_owned(), + serde_json::Value::String(min_deposit_amount.to_string()), + ) + .ok_or_else(|| eyre!("failed to update deposit_params amount in genesis file"))?; + + Ok(()) +} + pub fn set_staking_bond_denom(genesis: &mut serde_json::Value, denom: &str) -> Result<(), Error> { let bond_denom = genesis .get_mut("app_state") @@ -323,6 +359,48 @@ pub fn set_soft_opt_out_threshold( Ok(()) } +pub fn consensus_params_max_gas( + genesis: &mut serde_json::Value, + max_gas: &str, +) -> Result<(), Error> { + let block = genesis + .get_mut("consensus_params") + .and_then(|consensus_params| consensus_params.get_mut("block")) + .and_then(|block| block.as_object_mut()) + .ok_or_else(|| eyre!("failed to get `block` field in genesis file"))?; + + block.insert( + "max_gas".to_owned(), + serde_json::Value::String(max_gas.to_string()), + ); + + Ok(()) +} + +pub fn globalfee_minimum_gas_prices( + genesis: &mut serde_json::Value, + minimum_gas_prices: serde_json::Value, +) -> Result<(), Error> { + let globalfee = genesis + .get_mut("app_state") + .and_then(|app_state| app_state.get_mut("globalfee")); + + // Only update `minimum_gas_prices` if `globalfee` is enabled + match globalfee { + Some(globalfee) => { + let params = globalfee + .get_mut("params") + .and_then(|params| params.as_object_mut()) + .ok_or_else(|| eyre!("failed to get `params` fields in genesis file"))?; + + params.insert("minimum_gas_prices".to_owned(), minimum_gas_prices); + } + None => debug!("chain doesn't have `globalfee`"), + } + + Ok(()) +} + /// Look up a key in a JSON object, falling back to the second key if the first one cannot be found. /// /// This lets us support both Tendermint 0.34 and 0.37, which sometimes use different keys for the diff --git a/tools/test-framework/src/chain/ext/async_icq.rs b/tools/test-framework/src/chain/ext/async_icq.rs new file mode 100644 index 0000000000..cbbef12ba8 --- /dev/null +++ b/tools/test-framework/src/chain/ext/async_icq.rs @@ -0,0 +1,45 @@ +use crate::{ + chain::{ + cli::async_icq::{ + async_icq, + update_oracle, + }, + driver::ChainDriver, + }, + error::Error, + prelude::*, + types::tagged::*, +}; + +pub trait AsyncIcqMethodsExt { + fn update_oracle(&self, relayer: &str, account: &str) -> Result<(), Error>; + + fn async_icq(&self, channel_id: &ChannelId, query_json: &str, from: &str) -> Result<(), Error>; +} + +impl<'a, Chain: Send> AsyncIcqMethodsExt for MonoTagged { + fn update_oracle(&self, relayer: &str, account: &str) -> Result<(), Error> { + let driver = *self.value(); + update_oracle( + driver.chain_id.as_str(), + &driver.command_path, + &driver.home_path, + &driver.rpc_listen_address(), + account, + relayer, + ) + } + + fn async_icq(&self, channel_id: &ChannelId, query_json: &str, from: &str) -> Result<(), Error> { + let driver = *self.value(); + async_icq( + driver.chain_id.as_str(), + &driver.command_path, + &driver.home_path, + &driver.rpc_listen_address(), + channel_id.as_str(), + query_json, + from, + ) + } +} diff --git a/tools/test-framework/src/chain/ext/fee_grant.rs b/tools/test-framework/src/chain/ext/fee_grant.rs index 76491667c9..907f0edd11 100644 --- a/tools/test-framework/src/chain/ext/fee_grant.rs +++ b/tools/test-framework/src/chain/ext/fee_grant.rs @@ -1,15 +1,28 @@ use crate::{ chain::cli::fee_grant::feegrant_grant, error::Error, - prelude::ChainDriver, + prelude::{ + ChainDriver, + TaggedTokenRef, + }, types::tagged::MonoTagged, }; pub trait FeeGrantMethodsExt { - fn feegrant_grant(&self, granter: &str, grantee: &str) -> Result<(), Error>; + fn feegrant_grant( + &self, + granter: &str, + grantee: &str, + fees: &TaggedTokenRef, + ) -> Result<(), Error>; } impl<'a, Chain: Send> FeeGrantMethodsExt for MonoTagged { - fn feegrant_grant(&self, granter: &str, grantee: &str) -> Result<(), Error> { + fn feegrant_grant( + &self, + granter: &str, + grantee: &str, + fees: &TaggedTokenRef, + ) -> Result<(), Error> { feegrant_grant( self.value().chain_id.as_str(), &self.value().command_path, @@ -17,6 +30,7 @@ impl<'a, Chain: Send> FeeGrantMethodsExt for MonoTagged` becomes a unique chain diff --git a/tools/test-framework/src/relayer/channel.rs b/tools/test-framework/src/relayer/channel.rs index 83d9574bab..61374eea3d 100644 --- a/tools/test-framework/src/relayer/channel.rs +++ b/tools/test-framework/src/relayer/channel.rs @@ -12,6 +12,7 @@ use ibc_relayer::{ }, channel::{ extract_channel_id, + version::Version, Channel, ChannelSide, }, @@ -98,6 +99,45 @@ pub fn init_channel( Ok((DualTagged::new(channel_id), channel2)) } +pub fn init_channel_version( + handle_a: &ChainA, + handle_b: &ChainB, + client_id_a: &TaggedClientIdRef, + client_id_b: &TaggedClientIdRef, + connection_id_a: &TaggedConnectionIdRef, + connection_id_b: &TaggedConnectionIdRef, + src_port_id: &TaggedPortIdRef, + dst_port_id: &TaggedPortIdRef, + version: Version, +) -> Result<(TaggedChannelId, Channel), Error> { + let channel = Channel { + connection_delay: Default::default(), + ordering: Ordering::Unordered, + a_side: ChannelSide::new( + handle_a.clone(), + client_id_a.cloned_value(), + connection_id_a.cloned_value(), + src_port_id.cloned_value(), + None, + Some(version.clone()), + ), + b_side: ChannelSide::new( + handle_b.clone(), + client_id_b.cloned_value(), + connection_id_b.cloned_value(), + dst_port_id.cloned_value(), + None, + Some(version), + ), + }; + + let event = channel.build_chan_open_init_and_send()?; + let channel_id = extract_channel_id(&event)?.clone(); + let channel2 = Channel::restore_from_event(handle_b.clone(), handle_a.clone(), event)?; + + Ok((DualTagged::new(channel_id), channel2)) +} + pub fn init_channel_optimistic( handle_a: &ChainA, handle_b: &ChainB, @@ -203,7 +243,7 @@ pub fn assert_eventually_channel_established( + handle_a: &ChainA, + handle_b: &ChainB, + channel_id_a: &TaggedChannelIdRef, + port_id_a: &TaggedPortIdRef, +) -> Result, Error> { + assert_eventually_succeed( + "channel should eventually closed", + 20, + Duration::from_secs(2), + || { + let channel_end_a = query_channel_end(handle_a, channel_id_a, port_id_a)?; + + if !channel_end_a.value().state_matches(&ChannelState::Closed) { + return Err(Error::generic(eyre!( + "expected channel end A to be in closed state, but it is instead `{}", + channel_end_a.value().state() + ))); + } + + let channel_id_b = channel_end_a + .tagged_counterparty_channel_id() + .ok_or_else(|| { + eyre!("expected counterparty channel id to present on closed channel") + })?; + + let port_id_b = channel_end_a.tagged_counterparty_port_id(); + + let channel_end_b = + query_channel_end(handle_b, &channel_id_b.as_ref(), &port_id_b.as_ref())?; + + if !channel_end_b.value().state_matches(&ChannelState::Closed) { + return Err(Error::generic(eyre!( + "expected channel end B to be in closed state" + ))); + } + + Ok(channel_id_b) + }, + ) +} diff --git a/tools/test-framework/src/relayer/transfer.rs b/tools/test-framework/src/relayer/transfer.rs index 686838a97d..8b086aadb1 100644 --- a/tools/test-framework/src/relayer/transfer.rs +++ b/tools/test-framework/src/relayer/transfer.rs @@ -99,7 +99,7 @@ pub fn build_transfer_message( for testing. During test, all chains should have the same local clock. We are also not really interested in setting a timeout for most tests, so we just put an approximate 1 minute timeout as the timeout - field is compulsary, and we want to avoid IBC timeout on CI. + field is compulsory, and we want to avoid IBC timeout on CI. The other reason we do not allow precise timeout to be specified is because it requires accessing the counterparty chain to query for diff --git a/tools/test-framework/src/relayer/tx.rs b/tools/test-framework/src/relayer/tx.rs index 4765b39e19..6950c5f88e 100644 --- a/tools/test-framework/src/relayer/tx.rs +++ b/tools/test-framework/src/relayer/tx.rs @@ -28,8 +28,16 @@ use crate::error::{ pub fn gas_config_for_test(native_token: String) -> GasConfig { let max_gas = 3000000; - let gas_multiplier = 1.1; - let gas_price = GasPrice::new(0.003, native_token); + let gas_multiplier = 1.5; + + // Provenance requires a high gas price + let price = if native_token == "nhash" { + 5000.0 + } else { + 0.003 + }; + + let gas_price = GasPrice::new(price, native_token); let default_gas = max_gas; let fee_granter = "".to_string(); diff --git a/tools/test-framework/src/types/binary/chains.rs b/tools/test-framework/src/types/binary/chains.rs index 60ef84ab11..bcf87999c8 100644 --- a/tools/test-framework/src/types/binary/chains.rs +++ b/tools/test-framework/src/types/binary/chains.rs @@ -171,7 +171,7 @@ impl ExportEnv for ConnectedChains config::GasPrice::new( + 5000.0, + test_config.native_tokens[native_token_number].clone(), + ), + _ => config::GasPrice::new( + 0.003, + test_config.native_tokens[native_token_number].clone(), + ), + }; + Ok(config::ChainConfig::CosmosSdk(CosmosSdkConfig { id: self.chain_driver.chain_id.clone(), rpc_addr: Url::from_str(&self.chain_driver.rpc_address())?, @@ -185,20 +197,19 @@ impl FullNode { default_gas: None, max_gas: Some(3000000), gas_adjustment: None, - gas_multiplier: Some(GasMultiplier::unsafe_new(1.2)), + gas_multiplier: Some(GasMultiplier::unsafe_new(1.5)), fee_granter: None, max_msg_num: Default::default(), max_tx_size: Default::default(), max_grpc_decoding_size: config::default::max_grpc_decoding_size(), + query_packets_chunk_size: config::default::query_packets_chunk_size(), max_block_time: Duration::from_secs(30), clock_drift: Duration::from_secs(5), trusting_period: Some(Duration::from_secs(14 * 24 * 3600)), + client_refresh_rate: config::default::client_refresh_rate(), ccv_consumer_chain: false, trust_threshold: Default::default(), - gas_price: config::GasPrice::new( - 0.003, - test_config.native_tokens[native_token_number].clone(), - ), + gas_price, packet_filter: Default::default(), address_type: chain_type.address_type(), memo_prefix: Default::default(), diff --git a/tools/test-framework/src/util/suspend.rs b/tools/test-framework/src/util/suspend.rs index 00f021d6b2..afaec475ab 100644 --- a/tools/test-framework/src/util/suspend.rs +++ b/tools/test-framework/src/util/suspend.rs @@ -62,12 +62,12 @@ pub fn hang_on_error( } Ok(Err(e)) => { if hang_on_fail { - error!("test failure occured with HANG_ON_FAIL=1, suspending the test to allow debugging: {:?}", + error!("test failure occurred with HANG_ON_FAIL=1, suspending the test to allow debugging: {:?}", e); suspend() } else { - error!("test failure occured. set HANG_ON_FAIL=1 to suspend the test on failure for debugging: {:?}", + error!("test failure occurred. set HANG_ON_FAIL=1 to suspend the test on failure for debugging: {:?}", e); Err(e)