diff --git a/.dockerignore b/.dockerignore index aae0c8a6f9d..978a5b26769 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,2 +1,2 @@ **/target -Dockerfile +Dockerfile* diff --git a/.github/scripts/ci_test/ci_image_scan.py b/.github/scripts/ci_test/ci_image_scan.py new file mode 100644 index 00000000000..ec51046e61c --- /dev/null +++ b/.github/scripts/ci_test/ci_image_scan.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python + +""" +CI script for locating the improperly configured images +in Docker's Compose files. + +Scans a list of file masks/names and checks for allowed branches. +""" + +from typing import List +import sys +from logging import getLogger, warning, error, info, INFO +from argparse import ArgumentParser, Namespace +from pathlib import Path +from yaml import safe_load +from yaml.error import YAMLError +from git_root import git_root + +def parse_arguments() -> Namespace: + """ + Returns: + Namespace: An object containing two attributes: + - masks: A list of file masks and names provided as positional arguments. + - allow: A list of Docker images provided + to the 'allow' option, or [] if not provided. + """ + parser = ArgumentParser(description='Process some file names.') + parser.add_argument( + '--allow', + nargs='+', + help='one or more allowed image names', + default=[] + ) + parser.add_argument( + 'masks', + nargs='*', + help='list of file masks and exact names to be checked', + default=[] + ) + return parser.parse_args() + +def get_paths(file_masks: List[str], root: Path): + """ + Generate a list of pathlib.Path instances for given file masks + and filenames within a root directory. + + This function searches for files in a specified root directory + matching the patterns and filenames provided in `file_masks`. + It returns a list of pathlib.Path instances for files that exist. + Patterns can include wildcards (e.g., "*.yml"). + Only files that actually exist in the filesystem are included in the result. + + Args: + file_masks (list of str): A list of strings representing file masks + and filenames. + File masks can include wildcard characters + (e.g., "topic.*.yml"). + root (pathlib.Path): + A pathlib.Path instance representing + the root directory in which to search for files. + + Returns: + list: A list containing pathlib.Path instances for each existing + file matching the file masks and filenames + in the specified root directory. + + Raises: + TypeError: If `root` is not an instance of pathlib.Path. + + Note: + The function does not return paths for files that do not exist. + """ + if not isinstance(root, Path): + raise TypeError("The root argument must be a pathlib.Path instance") + paths = [] + for mask in file_masks: + if '*' in mask: + matching_files = root.glob(mask) + paths.extend([file for file in matching_files if file.exists()]) + else: + path = root / mask + if path.exists(): + paths.append(path) + else: + warning(f'File not found: {path.name}') + return paths + +def validate_docker_config(compose_file: Path, allow: List[str]): + """ + Validates a single Path for a Compose config. + + Returns: + (int) 1 if the image is using a config that isn't allowed, + 0 otherwise + """ + status = 0 + services = {} + try: + with open(compose_file, 'r', encoding='utf8') as compose_file_contents: + config_inst = safe_load(compose_file_contents.read()) + if isinstance(config_inst, dict): + services = config_inst.get('services', {}) + else: + error(f'Improper configuration at "{compose_file}"') + status = 1 + except YAMLError: + error(f'Improper formatting at "{compose_file}"') + status = 1 + for _, service in services.items(): + if service.get('image', '') not in allow: + status = 1 + break + return status + +def main(): + """ + Validate the supplied Docker configurations + and exit with an error if one of them is using + an incorrect image. + """ + getLogger().setLevel(INFO) + args = parse_arguments() + for current_file in get_paths(args.masks, git_root()): + if validate_docker_config(current_file, args.allow): + warning(f'Wrong image in "{current_file.name}"') + sys.exit(1) + info('No incorrect Compose configurations found') + +if __name__ == '__main__': + main() diff --git a/.github/scripts/ci_test/git_root.py b/.github/scripts/ci_test/git_root.py new file mode 100644 index 00000000000..7412b928c56 --- /dev/null +++ b/.github/scripts/ci_test/git_root.py @@ -0,0 +1,19 @@ +""" +This module contains a modified copy of git_root by Jan Tilly. +It allows to find a repo root on GitHub for the CI purposes. + +https://github.com/jtilly/git_root/blob/master/git_root/git_root.py +""" + +from subprocess import Popen, PIPE, DEVNULL +from os.path import abspath +from pathlib import Path + +def git_root(): + root = '.' + with Popen( + ['git', 'rev-parse', '--show-toplevel'], + stdout=PIPE, stderr=DEVNULL + ) as git_proc: + root = git_proc.communicate()[0].rstrip().decode('utf-8') + return Path(abspath(root)) diff --git a/.github/scripts/ci_test/requirements.txt b/.github/scripts/ci_test/requirements.txt new file mode 100644 index 00000000000..be2b74db40f --- /dev/null +++ b/.github/scripts/ci_test/requirements.txt @@ -0,0 +1 @@ +PyYAML==6.0.1 diff --git a/.github/workflows/iroha2-ci-image.yml b/.github/workflows/iroha2-ci-image.yml index d6b03258647..a150d18c6a1 100644 --- a/.github/workflows/iroha2-ci-image.yml +++ b/.github/workflows/iroha2-ci-image.yml @@ -1,6 +1,11 @@ name: I2::CI::Publish -on: workflow_dispatch +on: + workflow_dispatch: + inputs: + IROHA2_CI_DOCKERFILE: + required: true + default: Dockerfile.build jobs: dockerhub: @@ -17,6 +22,6 @@ jobs: push: true tags: hyperledger/iroha2-ci:nightly-2024-01-12 labels: commit=${{ github.sha }} - file: Dockerfile.build + file: ${{ github.event.inputs.IROHA2_CI_DOCKERFILE }} # This context specification is required context: . diff --git a/.github/workflows/iroha2-dev-pr-wasm.yaml b/.github/workflows/iroha2-dev-pr-wasm.yaml index aad3bfb085c..ff674b5bb9f 100644 --- a/.github/workflows/iroha2-dev-pr-wasm.yaml +++ b/.github/workflows/iroha2-dev-pr-wasm.yaml @@ -33,4 +33,4 @@ jobs: run: cargo install --path tools/wasm_test_runner - name: Run smart contract tests on WebAssembly VM working-directory: smart_contract - run: mold --run cargo test --tests --target wasm32-unknown-unknown --no-fail-fast --quiet + run: mold --run cargo test --release --tests --target wasm32-unknown-unknown --no-fail-fast --quiet diff --git a/.github/workflows/iroha2-dev-pr.yml b/.github/workflows/iroha2-dev-pr.yml index 900b5397679..1d5526cff59 100644 --- a/.github/workflows/iroha2-dev-pr.yml +++ b/.github/workflows/iroha2-dev-pr.yml @@ -69,6 +69,13 @@ jobs: compare-ref: ${{ github.base_ref }} compare-sha: ${{ github.event.pull_request.base.sha}} github-token: ${{ secrets.GITHUB_TOKEN }} + # (Temporally) Add the parallel coverage upload to Codecov to compare the results with Coveralls + # - name: Upload coverage to codecov.io + # uses: codecov/codecov-action@v3.1.4 + # with: + # files: lcov.info + # commit_parent: ${{ github.event.pull_request.base.sha }} + # fail_ci_if_error: false integration: runs-on: [self-hosted, Linux, iroha2ci] diff --git a/.github/workflows/iroha2-dev.yml b/.github/workflows/iroha2-dev.yml index 311d376632a..f777ead22f8 100644 --- a/.github/workflows/iroha2-dev.yml +++ b/.github/workflows/iroha2-dev.yml @@ -10,11 +10,40 @@ env: jobs: registry: runs-on: [self-hosted, Linux, iroha2-dev-push] - container: - image: hyperledger/iroha2-ci:nightly-2024-01-12 steps: - uses: actions/checkout@v4 - - uses: docker/login-action@v3 + - name: Set up Docker Buildx + id: buildx + if: always() + uses: docker/setup-buildx-action@v3 + with: + install: true + - name: Build and export to Docker iroha2:dev image + uses: docker/build-push-action@v5 + if: always() + with: + context: . + load: true + file: Dockerfile + tags: | + hyperledger/iroha2:dev + docker.soramitsu.co.jp/iroha2/iroha2:dev + cache-from: type=gha + cache-to: type=gha,mode=max + - name: Test docker-compose.single.yml before pushing + run: | + docker compose -f docker-compose.single.yml up --wait || exit 1 + docker compose -f docker-compose.single.yml down + - name: Test docker-compose.local.yml before pushing + run: | + docker compose -f docker-compose.local.yml up --wait || exit 1 + docker compose -f docker-compose.local.yml down + - name: Test docker-compose.yml before pushing + run: | + docker compose -f docker-compose.yml up --wait || exit 1 + docker compose -f docker-compose.yml down + - name: Login to DockerHub + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} @@ -24,27 +53,15 @@ jobs: registry: docker.soramitsu.co.jp username: ${{ secrets.HARBOR_USERNAME }} password: ${{ secrets.HARBOR_TOKEN }} - - name: Set up Docker Buildx - id: buildx - if: always() - uses: docker/setup-buildx-action@v3 - with: - install: true - - name: Build and push iroha2:dev image + - name: Push iroha2:dev image uses: docker/build-push-action@v5 - if: always() with: + context: . push: true tags: | hyperledger/iroha2:dev docker.soramitsu.co.jp/iroha2/iroha2:dev labels: commit=${{ github.sha }} - build-args: TAG=dev - file: Dockerfile - # This context specification is required - context: . - cache-from: type=gha - cache-to: type=gha,mode=max archive_binaries_and_schema: runs-on: ubuntu-latest diff --git a/.github/workflows/iroha2-no-incorrect-image.yml b/.github/workflows/iroha2-no-incorrect-image.yml new file mode 100644 index 00000000000..fbc6aa228de --- /dev/null +++ b/.github/workflows/iroha2-no-incorrect-image.yml @@ -0,0 +1,25 @@ +name: I2::CI::check_for_incorrect_images + +on: + push: + branches: + - iroha2-dev + - iroha2-stable + +jobs: + check: + runs-on: ubuntu-latest + steps: + - name: Set up Python 3.11 + uses: actions/setup-python@v1 + with: + python-version: "3.11" + - uses: actions/checkout@v3 + - name: Install dependencies + run: pip install -r .github/scripts/ci_test/requirements.txt --no-input + - name: Check containers on iroha2-stable branch + if: github.base_ref == 'iroha2-stable' + run: python .github/scripts/ci_test/ci_image_scan.py --allow iroha2:stable -- docker-compose*.yml + - name: Check containers on iroha2-dev branch + if: github.base_ref == 'iroha2-dev' + run: python .github/scripts/ci_test/ci_image_scan.py --allow iroha2:dev -- docker-compose*.yml diff --git a/.github/workflows/iroha2-profiling-image.yml b/.github/workflows/iroha2-profiling-image.yml new file mode 100644 index 00000000000..2f14c841cca --- /dev/null +++ b/.github/workflows/iroha2-profiling-image.yml @@ -0,0 +1,64 @@ +name: I2::Profiling::Publish + +on: + workflow_dispatch: + inputs: + IROHA2_IMAGE_TAG: + required: true + default: stable + IROHA2_IMAGE_RELEASE: + required: true + IROHA2_DOCKERFILE: + required: true + default: Dockerfile.glibc + IROHA2_PROFILE: + required: true + default: profiling + IROHA2_RUSTFLAGS: + required: false + default: -C force-frame-pointers=on + IROHA2_FEATURES: + required: false + default: profiling + IROHA2_CARGOFLAGS: + required: false + default: -Z build-std + +jobs: + registry: + runs-on: [self-hosted, Linux, iroha2-dev-push] + steps: + - uses: actions/checkout@v4 + - uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Login to Soramitsu Harbor + uses: docker/login-action@v3 + with: + registry: docker.soramitsu.co.jp + username: ${{ secrets.HARBOR_USERNAME }} + password: ${{ secrets.HARBOR_TOKEN }} + - name: Set up Docker Buildx + id: buildx + if: always() + uses: docker/setup-buildx-action@v3 + with: + install: true + - name: Build and push iroha2:profiling-image + uses: docker/build-push-action@v5 + if: always() + with: + push: true + tags: | + hyperledger/iroha2:${{ github.event.inputs.IROHA2_IMAGE_TAG }}-${{ github.event.inputs.IROHA2_IMAGE_RELEASE }}-profiling + docker.soramitsu.co.jp/iroha2/iroha2:${{ github.event.inputs.IROHA2_IMAGE_TAG }}-${{ github.event.inputs.IROHA2_IMAGE_RELEASE }}-profiling + labels: commit=${{ github.sha }} + build-args: | + "PROFILE=${{ github.event.inputs.IROHA2_PROFILE }}" + "RUSTFLAGS=${{ github.event.inputs.IROHA2_RUSTFLAGS }}" + "FEATURES=${{ github.event.inputs.IROHA2_FEATURES }}" + "CARGOFLAGS=${{ github.event.inputs.IROHA2_CARGOFLAGS }}" + file: ${{ github.event.inputs.IROHA2_DOCKERFILE }} + # This context specification is required + context: . diff --git a/.github/workflows/iroha2-release.yml b/.github/workflows/iroha2-release.yml index f704b75af4e..94c85acbb3c 100644 --- a/.github/workflows/iroha2-release.yml +++ b/.github/workflows/iroha2-release.yml @@ -10,8 +10,6 @@ env: jobs: registry: runs-on: ubuntu-latest - container: - image: hyperledger/iroha2-ci:nightly-2024-01-12 steps: - uses: actions/checkout@v4 - name: Set up Docker Buildx @@ -29,6 +27,31 @@ jobs: run: | RELEASE=$(curl -s https://raw.githubusercontent.com/hyperledger/iroha/${{ github.ref_name }}/Cargo.toml | sed -n '3p' | sed -e 's/version = "//g' -e 's/"$//' | tr -d '\n') echo "RELEASE=$RELEASE" >>$GITHUB_ENV + - name: Build and export to Docker iroha2 image + uses: docker/build-push-action@v5 + if: always() + with: + context: . + load: true + file: Dockerfile + tags: | + hyperledger/iroha2:${{ env.TAG }} + hyperledger/iroha2:${{ env.TAG }}-${{ env.RELEASE }} + docker.soramitsu.co.jp/iroha2/iroha2:${{ env.TAG }}-${{ env.RELEASE }} + cache-from: type=gha + cache-to: type=gha,mode=max + - name: Test docker-compose.single.yml before pushing + run: | + docker compose -f docker-compose.single.yml up --wait || exit 1 + docker compose -f docker-compose.single.yml down + - name: Test docker-compose.local.yml before pushing + run: | + docker compose -f docker-compose.local.yml up --wait || exit 1 + docker compose -f docker-compose.local.yml down + - name: Test docker-compose.yml before pushing + run: | + docker compose -f docker-compose.yml up --wait || exit 1 + docker compose -f docker-compose.yml down - name: Login to DockerHub uses: docker/login-action@v3 with: @@ -43,18 +66,13 @@ jobs: - name: Build and push iroha2 image uses: docker/build-push-action@v5 with: + context: . push: true tags: | hyperledger/iroha2:${{ env.TAG }} hyperledger/iroha2:${{ env.TAG }}-${{ env.RELEASE }} docker.soramitsu.co.jp/iroha2/iroha2:${{ env.TAG }}-${{ env.RELEASE }} labels: commit=${{ github.sha }} - build-args: TAG=${{ env.TAG }} - file: Dockerfile - # This context specification is required - context: . - cache-from: type=gha - cache-to: type=gha,mode=max configs: runs-on: ubuntu-latest diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 74bafed0f92..5ef9c16a5ec 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -266,6 +266,54 @@ tokio-console http://127.0.0.1:5555 +### Profiling + +
Expand to learn ho to profile iroha. + +To optimize performance it's useful to profile iroha. + +To do that you should compile iroha with `profiling` profile and with `profiling` feature: + +```bash +RUSTFLAGS="-C force-frame-pointers=on" cargo +nightly -Z build-std build --target your-desired-target --profile profiling --features profiling +``` + +Then start iroha and attach profiler of your choice to the iroha pid. + +Alternatively it's possible to build iroha inside docker with profiler support and profile iroha this way. + +```bash +docker build -f Dockerfile.glibc --build-arg="PROFILE=profiling" --build-arg='RUSTFLAGS=-C force-frame-pointers=on' --build-arg='FEATURES=profiling' --build-arg='CARGOFLAGS=-Z build-std' -t iroha2:profiling . +``` + +E.g. using perf (available only on linux): + +```bash +# to capture profile +sudo perf record -g -p +# to analyze profile +sudo perf report +``` + +To be able to observe profile of the executor during iroha profiling, executor should be compiled without stripping symbols. +It can be done by running: + +```bash +# compile executor without optimizations +cargo run --bin iroha_wasm_builder_cli -- build ./path/to/executor --outfile executor.wasm +``` + +With profiling feature enabled iroha exposes endpoint to scrap pprof profiles: + +```bash +# profile iroha for 30 seconds and get protobuf profile +curl host:port/debug/pprof/profile?seconds=30 -o profile.pb +# analyze profile in browser (required installed go) +go tool pprof -web profile.pb +``` + +
+ ## Style Guides Please follow these guidelines when you make code contributions to our project: diff --git a/Cargo.lock b/Cargo.lock index 9e686724df6..4b76a0de150 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,7 +30,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -60,25 +60,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee5cca1ddc8b9dceb55b7f1272a9d1e643d73006f350a20ab4926d24e33f0f0d" -[[package]] -name = "amcl_wrapper" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c7c7627444413f6a488bf9e6d352aea6fcfa281123cd92ecac0b3c9ef5ef2" -dependencies = [ - "byteorder", - "lazy_static", - "miracl_core", - "rand 0.7.3", - "rayon", - "serde", - "serde_bytes", - "serde_json", - "sha3", - "subtle-encoding", - "zeroize", -] - [[package]] name = "android-tzdata" version = "0.1.1" @@ -166,6 +147,135 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +[[package]] +name = "ark-bls12-377" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb00293ba84f51ce3bd026bd0de55899c4e68f0a39a5728cebae3a73ffdc0a4f" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-std", +] + +[[package]] +name = "ark-bls12-381" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c775f0d12169cba7aae4caeb547bb6a50781c7449a8aa53793827c9ec4abf488" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", +] + +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.13.2", + "itertools 0.10.5", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm", + "ark-ff-macros", + "ark-serialize", + "ark-std", + "derivative", + "digest", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-poly" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.13.2", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-serialize-derive", + "ark-std", + "digest", + "num-bigint", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand", +] + [[package]] name = "arrayref" version = "0.3.7" @@ -390,19 +500,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array 0.12.4", + "digest", ] [[package]] @@ -411,16 +509,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", + "generic-array", ] [[package]] @@ -455,12 +544,6 @@ version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - [[package]] name = "byte-unit" version = "4.0.19" @@ -736,6 +819,12 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "constcat" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd7e35aee659887cbfb97aaf227ac12cad1a9d7c71e55ff3376839ed4e282d08" + [[package]] name = "core-foundation" version = "0.9.4" @@ -770,6 +859,15 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "cpp_demangle" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119" +dependencies = [ + "cfg-if", +] + [[package]] name = "cpufeatures" version = "0.2.11" @@ -1030,8 +1128,8 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ - "generic-array 0.14.7", - "rand_core 0.6.4", + "generic-array", + "rand_core", "subtle", "zeroize", ] @@ -1042,8 +1140,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.7", - "rand_core 0.6.4", + "generic-array", "typenum", ] @@ -1056,7 +1153,7 @@ dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", - "digest 0.10.7", + "digest", "fiat-crypto", "platforms", "rustc_version", @@ -1207,6 +1304,17 @@ dependencies = [ "powerfmt", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -1229,22 +1337,13 @@ dependencies = [ "thiserror", ] -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.4", -] - [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "const-oid", "crypto-common", "subtle", @@ -1313,7 +1412,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ "der", - "digest 0.10.7", + "digest", "elliptic-curve", "rfc6979", "signature", @@ -1338,7 +1437,7 @@ checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core 0.6.4", + "rand_core", "serde", "sha2", "subtle", @@ -1359,12 +1458,12 @@ checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ "base16ct", "crypto-bigint", - "digest 0.10.7", + "digest", "ff", - "generic-array 0.14.7", + "generic-array", "group", "pkcs8", - "rand_core 0.6.4", + "rand_core", "sec1", "subtle", "zeroize", @@ -1457,7 +1556,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -1479,6 +1578,18 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "findshlibs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" +dependencies = [ + "cc", + "lazy_static", + "libc", + "winapi", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -1656,15 +1767,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "generic-array" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = [ - "typenum", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -1676,17 +1778,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.11" @@ -1695,7 +1786,7 @@ checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" dependencies = [ "cfg-if", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", ] [[package]] @@ -2230,7 +2321,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -2371,7 +2462,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.7", + "digest", ] [[package]] @@ -2554,7 +2645,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -2633,7 +2724,7 @@ dependencies = [ "iroha_wasm_builder", "once_cell", "parity-scale-codec", - "rand 0.8.5", + "rand", "serde", "serde_json", "tempfile", @@ -2748,7 +2839,7 @@ dependencies = [ "once_cell", "parity-scale-codec", "parking_lot", - "rand 0.8.5", + "rand", "serde", "serde_json", "tempfile", @@ -2776,13 +2867,12 @@ version = "2.0.0-pre-rc.20" dependencies = [ "aead", "amcl", - "amcl_wrapper", "arrayref", "blake2", "chacha20poly1305", "curve25519-dalek", "derive_more", - "digest 0.10.7", + "digest", "displaydoc", "ed25519-dalek", "elliptic-curve", @@ -2798,8 +2888,8 @@ dependencies = [ "libsodium-sys-stable", "openssl", "parity-scale-codec", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "secp256k1", "serde", "serde_json", @@ -2807,6 +2897,7 @@ dependencies = [ "sha2", "signature", "thiserror", + "w3f-bls", "x25519-dalek", "zeroize", ] @@ -2929,7 +3020,7 @@ dependencies = [ "iroha_config", "iroha_futures_derive", "iroha_logger", - "rand 0.8.5", + "rand", "serde", "serde_json", "tokio", @@ -3018,7 +3109,7 @@ dependencies = [ "iroha_logger", "iroha_primitives", "parity-scale-codec", - "rand 0.8.5", + "rand", "test_network", "thiserror", "tokio", @@ -3097,6 +3188,7 @@ name = "iroha_smart_contract" version = "2.0.0-pre-rc.20" dependencies = [ "derive_more", + "getrandom", "iroha_data_model", "iroha_macro", "iroha_smart_contract_derive", @@ -3205,6 +3297,7 @@ dependencies = [ "iroha_torii_derive", "iroha_version", "parity-scale-codec", + "pprof", "serde", "serde_json", "thiserror", @@ -3417,7 +3510,6 @@ dependencies = [ "elliptic-curve", "once_cell", "sha2", - "signature", ] [[package]] @@ -3700,16 +3792,10 @@ checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] -[[package]] -name = "miracl_core" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4330eca86d39f2b52d0481aa1e90fe21bfa61f11b0bf9b48ab95595013cefe48" - [[package]] name = "multer" version = "2.1.0" @@ -3755,6 +3841,17 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", +] + [[package]] name = "nom" version = "7.1.3" @@ -3775,6 +3872,27 @@ dependencies = [ "winapi", ] +[[package]] +name = "num-bigint" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.17" @@ -3828,12 +3946,6 @@ version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - [[package]] name = "opaque-debug" version = "0.3.0" @@ -4186,7 +4298,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", - "opaque-debug 0.3.0", + "opaque-debug", "universal-hash", ] @@ -4196,6 +4308,27 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" +[[package]] +name = "pprof" +version = "0.13.0" +source = "git+https://github.com/Erigara/pprof-rs?branch=fix_pointer_align#55fa41916b9bb7f2029643e26168556efda19333" +dependencies = [ + "backtrace", + "cfg-if", + "findshlibs", + "libc", + "log", + "nix", + "once_cell", + "parking_lot", + "protobuf", + "protobuf-codegen-pure", + "smallvec", + "symbolic-demangle", + "tempfile", + "thiserror", +] + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -4287,8 +4420,8 @@ dependencies = [ "bitflags 2.4.1", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "rand_xorshift", "regex-syntax 0.8.2", "rusty-fork", @@ -4328,6 +4461,31 @@ dependencies = [ "prost", ] +[[package]] +name = "protobuf" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "protobuf-codegen" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "033460afb75cf755fcfc16dfaed20b86468082a2ea24e05ac35ab4a099a017d6" +dependencies = [ + "protobuf", +] + +[[package]] +name = "protobuf-codegen-pure" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95a29399fc94bcd3eeaa951c715f7bea69409b2445356b00519740bcd6ddd865" +dependencies = [ + "protobuf", + "protobuf-codegen", +] + [[package]] name = "psm" version = "0.1.21" @@ -4358,19 +4516,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - [[package]] name = "rand" version = "0.8.5" @@ -4378,18 +4523,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -4399,16 +4534,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -4417,16 +4543,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.11", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "getrandom", ] [[package]] @@ -4435,7 +4552,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -4473,7 +4590,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ - "getrandom 0.2.11", + "getrandom", "libredox", "thiserror", ] @@ -4558,7 +4675,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", - "getrandom 0.2.11", + "getrandom", "libc", "spin", "untrusted", @@ -4726,7 +4843,7 @@ checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct", "der", - "generic-array 0.14.7", + "generic-array", "pkcs8", "subtle", "zeroize", @@ -4738,7 +4855,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2acea373acb8c21ecb5a23741452acd2593ed44ee3d343e72baaa143bc89d0d5" dependencies = [ - "rand 0.8.5", + "rand", "secp256k1-sys", "serde", ] @@ -4790,15 +4907,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde_bytes" -version = "0.11.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" -dependencies = [ - "serde", -] - [[package]] name = "serde_derive" version = "1.0.193" @@ -4901,7 +5009,7 @@ checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] @@ -4918,7 +5026,7 @@ checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] @@ -4936,15 +5044,12 @@ dependencies = [ [[package]] name = "sha3" -version = "0.8.2" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd26bc0e7a2e3a7c959bc494caf58b72ee0c71d67704e9520f736ca7e4853ecf" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "block-buffer 0.7.3", - "byte-tools", - "digest 0.8.1", + "digest", "keccak", - "opaque-debug 0.2.3", ] [[package]] @@ -4998,8 +5103,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", + "digest", + "rand_core", ] [[package]] @@ -5199,15 +5304,6 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" -[[package]] -name = "subtle-encoding" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dcb1ed7b8330c5eed5441052651dd7a12c75e2ed88f2ec024ae1fa3a5e59945" -dependencies = [ - "zeroize", -] - [[package]] name = "supports-color" version = "1.3.1" @@ -5228,6 +5324,29 @@ dependencies = [ "is_ci", ] +[[package]] +name = "symbolic-common" +version = "12.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cccfffbc6bb3bb2d3a26cd2077f4d055f6808d266f9d4d158797a4c60510dfe" +dependencies = [ + "debugid", + "memmap2 0.9.0", + "stable_deref_trait", + "uuid", +] + +[[package]] +name = "symbolic-demangle" +version = "12.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a99812da4020a67e76c4eb41f08c87364c14170495ff780f30dd519c221a68" +dependencies = [ + "cpp_demangle 0.4.3", + "rustc-demangle", + "symbolic-common", +] + [[package]] name = "syn" version = "1.0.109" @@ -5317,7 +5436,7 @@ dependencies = [ "iroha_logger", "iroha_primitives", "parity-scale-codec", - "rand 0.8.5", + "rand", "serde_json", "tempfile", "tokio", @@ -5594,7 +5713,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand 0.8.5", + "rand", "slab", "tokio", "tokio-util", @@ -5743,7 +5862,7 @@ dependencies = [ "httparse", "log", "native-tls", - "rand 0.8.5", + "rand", "rustls", "rustls-native-certs", "sha1", @@ -5907,7 +6026,7 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" dependencies = [ - "getrandom 0.2.11", + "getrandom", ] [[package]] @@ -5940,6 +6059,30 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "w3f-bls" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7335e4c132c28cc43caef6adb339789e599e39adbe78da0c4d547fad48cbc331" +dependencies = [ + "ark-bls12-377", + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-serialize-derive", + "arrayref", + "constcat", + "digest", + "rand", + "rand_chacha", + "rand_core", + "sha2", + "sha3", + "thiserror", + "zeroize", +] + [[package]] name = "wait-timeout" version = "0.2.0" @@ -5999,12 +6142,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -6306,7 +6443,7 @@ dependencies = [ "anyhow", "bincode", "cfg-if", - "cpp_demangle", + "cpp_demangle 0.3.5", "gimli", "ittapi", "log", @@ -6362,7 +6499,7 @@ dependencies = [ "memfd", "memoffset", "paste", - "rand 0.8.5", + "rand", "rustix", "sptr", "wasm-encoder 0.36.2", @@ -6762,9 +6899,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ "curve25519-dalek", - "rand_core 0.6.4", - "serde", - "zeroize", + "rand_core", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e47154c4890..b961c3d7966 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,7 +116,7 @@ strum = { version = "0.25.0", default-features = false } getset = "0.1.2" hex-literal = "0.4.1" -rand = "0.8.5" +rand = { version = "0.8.5", default-features = false, features = ["getrandom", "alloc"] } warp = { version = "0.3.6", default-features = false } wasmtime = "15.0.0" @@ -253,3 +253,7 @@ members = [ inherits = "release" strip = "symbols" lto = true + +[profile.profiling] +inherits = "release" +debug = "limited" diff --git a/Dockerfile.glibc b/Dockerfile.glibc index 25eb2e0068a..fd4668a6e7e 100644 --- a/Dockerfile.glibc +++ b/Dockerfile.glibc @@ -1,16 +1,13 @@ -#base stage -FROM archlinux:base-devel AS builder +# base stage +FROM debian:bookworm-slim AS builder -# Force-sync packages, install archlinux-keyring, repopulate keys -RUN pacman -Syy -RUN pacman -S archlinux-keyring --noconfirm --disable-download-timeout -RUN rm -rf /etc/pacman.d/gnupg/* && pacman-key --init && pacman-key --populate archlinux +# install required packages +RUN apt-get update -y && \ + apt-get install -y curl build-essential mold -# Install updates -RUN pacman -Syu --noconfirm --disable-download-timeout - -# Set up Rust toolchain -RUN pacman -S rustup mold wget --noconfirm --disable-download-timeout +# set up Rust toolchain +RUN curl https://sh.rustup.rs -sSf | bash -s -- -y +ENV PATH="/root/.cargo/bin:${PATH}" RUN rustup toolchain install nightly-2024-01-12 RUN rustup default nightly-2024-01-12 RUN rustup target add wasm32-unknown-unknown @@ -19,15 +16,18 @@ RUN rustup component add rust-src # builder stage WORKDIR /iroha COPY . . -RUN mold --run cargo build --target x86_64-unknown-linux-gnu --profile deploy +ARG PROFILE="deploy" +ARG RUSTFLAGS="" +ARG FEATURES="" +ARG CARGOFLAGS="" +RUN RUSTFLAGS="${RUSTFLAGS}" mold --run cargo ${CARGOFLAGS} build --target x86_64-unknown-linux-gnu --profile "${PROFILE}" --features "${FEATURES}" # final image -FROM alpine:3.18 +FROM debian:bookworm-slim -ENV GLIBC_REPO=https://github.com/sgerrand/alpine-pkg-glibc -ENV GLIBC_VERSION=2.35-r1 +ARG PROFILE="deploy" ARG STORAGE=/storage -ARG TARGET_DIR=/iroha/target/x86_64-unknown-linux-gnu/deploy +ARG TARGET_DIR=/iroha/target/x86_64-unknown-linux-gnu/${PROFILE} ENV BIN_PATH=/usr/local/bin/ ENV CONFIG_DIR=/config ENV IROHA2_CONFIG_PATH=$CONFIG_DIR/config.json @@ -39,12 +39,9 @@ ENV UID=1001 ENV GID=1001 RUN set -ex && \ - apk --update add libstdc++ curl ca-certificates gcompat && \ - for pkg in glibc-${GLIBC_VERSION} glibc-bin-${GLIBC_VERSION}; \ - do curl -sSL ${GLIBC_REPO}/releases/download/${GLIBC_VERSION}/${pkg}.apk -o /tmp/${pkg}.apk; done && \ - apk add --force-overwrite --allow-untrusted /tmp/*.apk && \ - rm -v /tmp/*.apk && \ - addgroup -g $GID $USER && \ + apt-get update -y && \ + apt-get install -y curl ca-certificates && \ + addgroup --gid $GID $USER && \ adduser \ --disabled-password \ --gecos "" \ diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 82ac7be7002..e8f89706770 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -1,32 +1,34 @@ ## Maintainers -Maintainers of Hyperledger Iroha project -are supposed to help contributors by explain them project details, -such as architecture, process, existing issues. +Maintainers of the Hyperledger Iroha project +are supposed to help contributors by explaining them project details, +such as architecture, process, and existing issues. -This is the list of maintainers, including their email address for direct communications: +This is the list of maintainers, including their email addresses for direct communication: -| Name | GitHub Id | email | Area of expertise | -|------------------------|--------------------------|--------------------------------|---------------------------------| -| Makoto Takemiya | @takemiyamakoto | takemiya@soramitsu.co.jp | Product vision | -| Ryu Okada | @ryuo88 | okada@soramitsu.co.jp | Product vision | -| Nikolai Iushkevich | @neewy | n.yushkevich@hotmail.com | Development | -| Fyodor Muratov | @muratovv | fyodor@soramitsu.co.jp | Architecture, Java library, QA | -| Andrei Lebedev | @lebdron | andrei@soramitsu.co.jp | Research | -| Sergei Solonets | @Solonets | ssolonets@gmail.com | Development | -| Yanno Ban | @yannoban | ban.yanno@nbc.org.kh | Development | -| Dumitru Savva | @x3medima17 | savva@soramitsu.co.jp | Development | -| Nikita Alekseev | @nickaleks | alekseev@soramitsu.co.jp | Development | -| Victor Drobny | @victordrobny | drobny@soramitsu.co.jp | Development | -| Bulat Nasrulin | @grimadas | bulat@soramitsu.co.jp | Development | -| Kamil Salakhiev | @kamilsa | kamil@soramitsu.co.jp | Development | -| Igor Egorov | @igor-egorov | igor@soramitsu.co.jp | Development, Android library | -| Konstantin Munichev | @luckychess | konstantin@soramitsu.co.jp | Security | -| Evgenii Mininbaev | @l4l | evgenii@soramitsu.co.jp | Security, Python library | -| Vyacheslav Bikbaev | @laSinteZ | viacheslav@soramitsu.co.jp | Documentation, NodeJS library | -| Arseniy Fokin | @stinger112 | stinger112@gmail.com | NodeJS library | -| Alexey Chernyshov | @Alexey-N-Chernyshov | chernyshov@soramitsu.co.jp | Development | -| Artyom Bakhtin | @bakhtin | a@bakhtin.net | Ansible, Jenkins, artifacts | -| Anatoly Tyukushin | @tyvision | tyukushin@soramitsu.co.jp | Ansible, Jenkins | -| Nikita Puzankov | @humb1t | puzankov@soramitsu.co.jp | Development | -| Egor Ivkov | @eadventurous | ivkov@soramitsu.co.jp | Development | +| Name | GitHub Id | email | Area of expertise | +| --------------------- | ---------------------------------------------------- | ------------------------------- | ----------------------------------------------------------------------------- | +| Makoto Takemiya | [@takemiyamakoto](https://github.com/takemiyamakoto) | takemiya@soramitsu.co.jp | Product vision | +| Dmitri Venger | [@dmitrivenger](https://github.com/dmitrivenger) | venger@soramitsu.co.jp | Project manager | +| Bogdan Mingela | [@Mingela](https://github.com/Mingela) | mingela@soramitsu.co.jp | Iroha Team Lead | +| Aleksandr Petrosyan | [@appetrosyan](https://github.com/appetrosyan) | ap886@cantab.ac.uk | Iroha 2 architect | +| Daniil Polyakov | [@Arjentix](https://github.com/Arjentix) | daniil.polyakov@soramitsu.co.jp | Development: Rust, C++ | +| Marin Veršić | [@mversic](https://github.com/mversic) | versic@soramitsu.co.jp | Tech lead, development: Rust, Java | +| Sam H. Smith | [@SamHSmith](https://github.com/SamHSmith) | smith@soramitsu.co.jp | Development: Rust, C | +| Shanin Roman | [@Erigara](https://github.com/Erigara) | shanin@soramitsu.co.jp | Development: Rust | +| Dmitry Balashov | [@0x009922](https://github.com/0x009922) | dbalashov@soramitsu.co.jp | Development: Rust, TypeScript, JavaScript | +| Grzegorz Bazior | [@baziorek](https://github.com/baziorek) | g.bazior@yodiss.pl | Development: C++, Python | +| Shunkichi Sato | [@s8sato](https://github.com/s8sato) | s.sato@soramitsu.co.jp | Development: Rust | +| Andrey Kostiuchenko | [@arndey](https://github.com/arndey) | kostiuchenko@soramitsu.co.jp | Developer: Java, Kotlin | +| Timur Guskov | [@gv-timur](https://github.com/gv-timur) | guskov@soramitsu.co.jp | Development: Java | +| Dmitriy Creed | [@Cre-eD](https://github.com/Cre-eD) | creed@soramitsu.co.jp | DevSecOps | +| Vasily Zyabkin | [@BAStos525](https://github.com/BAStos525) | zyabkin@soramitsu.co.jp | DevOps | +| Ekaterina Mekhnetsova | [@outoftardis](https://github.com/outoftardis) | mekhnetsova@soramitsu.co.jp | Documentation | +| William Richter | [@WRRicht3r](https://github.com/WRRicht3r) | richter@soramitsu.co.jp | Documentation | +| Victor Gridnevsky | [@6r1d](https://github.com/6r1d) | gridnevsky@soramitsu.co.jp | Community manager, documentation, development: JavaScript, TypeScript, Python | +| Alexander Strokov | [@astrokov7](https://github.com/astrokov7) | strokov@soramitsu.co.jp | QA, Python | +| Michael Timofeev | [@timofeevmd](https://github.com/timofeevmd) | timofeev@soramitsu.co.jp | QA | +| Nikita Strygin | [@DCNick3](https://github.com/DCNick3) | moslike6@gmail.com | Development: Rust | +| Bogdan Yamkovoy | [@yamkovoy](https://github.com/yamkovoy) | yamkovoy@soramitsu.co.jp | Documentation | +| Vladislav Amuzinski | [@VAmuzing](https://github.com/VAmuzing) | amuzinski@soramitsu.co.jp | Development: Rust | +| Artem Stukalov | [@Stukalov-A-M](https://github.com/Stukalov-A-M) | stukalov@soramitsu.co.jp | Documentation: examples | diff --git a/cli/src/lib.rs b/cli/src/lib.rs index 6de12024e6a..13c69571e59 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -274,7 +274,7 @@ impl Iroha { &config.block_sync, sumeragi.clone(), Arc::clone(&kura), - PeerId::new(&config.torii.p2p_addr, &config.public_key), + PeerId::new(config.torii.p2p_addr.clone(), config.public_key.clone()), network.clone(), ) .start(); @@ -459,7 +459,8 @@ impl Iroha { // FIXME: don't like neither the message nor inability to throw Result to the outside .expect("Cannot proceed without working subscriptions"); - // NOTE: Triggered by tokio::select + // See https://github.com/tokio-rs/tokio/issues/5616 and + // https://github.com/rust-lang/rust-clippy/issues/10636 #[allow(clippy::redundant_pub_crate)] loop { tokio::select! { diff --git a/cli/src/samples.rs b/cli/src/samples.rs index 05858803f19..94d2495b884 100644 --- a/cli/src/samples.rs +++ b/cli/src/samples.rs @@ -30,16 +30,10 @@ pub fn get_trusted_peers(public_key: Option<&PublicKey>) -> HashSet { ), ] .iter() - .map(|(a, k)| PeerId { - address: a.parse().expect("Valid"), - public_key: PublicKey::from_str(k).unwrap(), - }) + .map(|(a, k)| PeerId::new(a.parse().expect("Valid"), PublicKey::from_str(k).unwrap())) .collect(); if let Some(pubkey) = public_key { - trusted_peers.insert(PeerId { - address: DEFAULT_TORII_P2P_ADDR.clone(), - public_key: pubkey.clone(), - }); + trusted_peers.insert(PeerId::new(DEFAULT_TORII_P2P_ADDR.clone(), pubkey.clone())); } trusted_peers } diff --git a/client/benches/tps/utils.rs b/client/benches/tps/utils.rs index e11e2febe90..a8e2c086c71 100644 --- a/client/benches/tps/utils.rs +++ b/client/benches/tps/utils.rs @@ -225,9 +225,7 @@ impl MeasurerUnit { .with_instructions([instruction]); transaction.set_nonce(nonce); // Use nonce to avoid transaction duplication within the same thread - let transaction = submitter - .sign_transaction(transaction) - .expect("Failed to sign transaction"); + let transaction = submitter.sign_transaction(transaction); if let Err(error) = submitter.submit_transaction(&transaction) { iroha_logger::error!(?error, "Failed to submit transaction"); } diff --git a/client/examples/tutorial.rs b/client/examples/tutorial.rs index cead2516b4a..40c56a1a3aa 100644 --- a/client/examples/tutorial.rs +++ b/client/examples/tutorial.rs @@ -74,9 +74,7 @@ fn domain_registration_test(config: &Configuration) -> Result<(), Error> { // Prepare a transaction let metadata = UnlimitedMetadata::default(); let instructions: Vec = vec![create_looking_glass.into()]; - let tx = iroha_client - .build_transaction(instructions, metadata) - .wrap_err("Error building a domain registration transaction")?; + let tx = iroha_client.build_transaction(instructions, metadata); // #endregion domain_register_example_prepare_tx // #region domain_register_example_submit_tx @@ -148,7 +146,7 @@ fn account_registration_test(config: &Configuration) -> Result<(), Error> { // Account's RegisterBox let metadata = UnlimitedMetadata::new(); let instructions: Vec = vec![create_account.into()]; - let tx = iroha_client.build_transaction(instructions, metadata)?; + let tx = iroha_client.build_transaction(instructions, metadata); // #endregion register_account_prepare_tx // #region register_account_submit_tx diff --git a/client/src/client.rs b/client/src/client.rs index 427f22effc1..638c19a5305 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -63,30 +63,17 @@ pub type QueryResult = core::result::Result; /// Trait for signing transactions pub trait Sign { /// Sign transaction with provided key pair. - /// - /// # Errors - /// - /// Fails if signature creation fails - fn sign( - self, - key_pair: crate::crypto::KeyPair, - ) -> Result; + fn sign(self, key_pair: &crate::crypto::KeyPair) -> SignedTransaction; } impl Sign for TransactionBuilder { - fn sign( - self, - key_pair: crate::crypto::KeyPair, - ) -> Result { + fn sign(self, key_pair: &crate::crypto::KeyPair) -> SignedTransaction { self.sign(key_pair) } } impl Sign for SignedTransaction { - fn sign( - self, - key_pair: crate::crypto::KeyPair, - ) -> Result { + fn sign(self, key_pair: &crate::crypto::KeyPair) -> SignedTransaction { self.sign(key_pair) } } @@ -468,7 +455,7 @@ impl Client { &self, instructions: impl Into, metadata: UnlimitedMetadata, - ) -> Result { + ) -> SignedTransaction { let tx_builder = TransactionBuilder::new(self.chain_id.clone(), self.account_id.clone()); let mut tx_builder = match instructions.into() { @@ -484,30 +471,23 @@ impl Client { tx_builder.set_nonce(nonce); }; - tx_builder - .with_metadata(metadata) - .sign(self.key_pair.clone()) - .wrap_err("Failed to sign transaction") + tx_builder.with_metadata(metadata).sign(&self.key_pair) } /// Signs transaction /// /// # Errors /// Fails if signature generation fails - pub fn sign_transaction(&self, transaction: Tx) -> Result { - transaction - .sign(self.key_pair.clone()) - .wrap_err("Failed to sign transaction") + pub fn sign_transaction(&self, transaction: Tx) -> SignedTransaction { + transaction.sign(&self.key_pair) } /// Signs query /// /// # Errors /// Fails if signature generation fails - pub fn sign_query(&self, query: QueryBuilder) -> Result { - query - .sign(self.key_pair.clone()) - .wrap_err("Failed to sign query") + pub fn sign_query(&self, query: QueryBuilder) -> SignedQuery { + query.sign(&self.key_pair) } /// Instructions API entry point. Submits one Iroha Special Instruction to `Iroha` peers. @@ -557,7 +537,7 @@ impl Client { instructions: impl IntoIterator, metadata: UnlimitedMetadata, ) -> Result> { - self.submit_transaction(&self.build_transaction(instructions, metadata)?) + self.submit_transaction(&self.build_transaction(instructions, metadata)) } /// Submit a prebuilt transaction. @@ -746,15 +726,12 @@ impl Client { instructions: impl IntoIterator, metadata: UnlimitedMetadata, ) -> Result> { - let transaction = self.build_transaction(instructions, metadata)?; + let transaction = self.build_transaction(instructions, metadata); self.submit_transaction_blocking(&transaction) } /// Lower-level Query API entry point. Prepares an http-request and returns it with an http-response handler. /// - /// # Errors - /// Fails if query signing fails. - /// /// # Examples /// /// ```ignore @@ -817,12 +794,12 @@ impl Client { pagination: Pagination, sorting: Sorting, fetch_size: FetchSize, - ) -> Result<(DefaultRequestBuilder, QueryResponseHandler)> + ) -> (DefaultRequestBuilder, QueryResponseHandler) where >::Error: Into, { let query_builder = QueryBuilder::new(request, self.account_id.clone()).with_filter(filter); - let request = self.sign_query(query_builder)?.encode_versioned(); + let request = self.sign_query(query_builder).encode_versioned(); let query_request = QueryRequest { torii_url: self.torii_url.clone(), @@ -834,10 +811,10 @@ impl Client { ), }; - Ok(( + ( query_request.clone().assemble(), QueryResponseHandler::new(query_request), - )) + ) } /// Create a request with pagination, sorting and add the filter. @@ -858,7 +835,7 @@ impl Client { { iroha_logger::trace!(?request, %pagination, ?sorting, ?filter); let (req, mut resp_handler) = - self.prepare_query_request::(request, filter, pagination, sorting, fetch_size)?; + self.prepare_query_request::(request, filter, pagination, sorting, fetch_size); let response = req.build()?.send()?; let value = resp_handler.handle(&response)?; @@ -1000,57 +977,37 @@ impl Client { ) } - /// Check if two transactions are the same. Compare their contents excluding the creation time. - fn equals_excluding_creation_time( - first: &TransactionPayload, - second: &TransactionPayload, - ) -> bool { - first.authority() == second.authority() - && first.instructions() == second.instructions() - && first.time_to_live() == second.time_to_live() - && first.metadata().eq(second.metadata()) - } - - /// Find the original transaction in the pending local tx - /// queue. Should be used for an MST case. Takes pagination as - /// parameter. + /// Find the original transaction in the local pending tx queue. + /// Should be used for an MST case. /// /// # Errors - /// - if subscribing to websocket fails - pub fn get_original_transaction_with_pagination( + /// - if sending request fails + pub fn get_original_matching_transactions( &self, transaction: &SignedTransaction, retry_count: u32, retry_in: Duration, - pagination: Pagination, - ) -> Result> { - let pagination = pagination.into_query_parameters(); + ) -> Result> { + let url = self + .torii_url + .join(crate::config::torii::MATCHING_PENDING_TRANSACTIONS) + .expect("Valid URI"); + let body = transaction.encode(); + for _ in 0..retry_count { - let response = DefaultRequestBuilder::new( - HttpMethod::GET, - self.torii_url - .join(crate::config::torii::PENDING_TRANSACTIONS) - .expect("Valid URI"), - ) - .params(pagination.clone()) - .headers(self.headers.clone()) - .build()? - .send()?; + let response = DefaultRequestBuilder::new(HttpMethod::POST, url.clone()) + .headers(self.headers.clone()) + .header(http::header::CONTENT_TYPE, APPLICATION_JSON) + .body(body.clone()) + .build()? + .send()?; if response.status() == StatusCode::OK { let pending_transactions: Vec = DecodeAll::decode_all(&mut response.body().as_slice())?; - let transaction = pending_transactions - .into_iter() - .find(|pending_transaction| { - Self::equals_excluding_creation_time( - pending_transaction.payload(), - transaction.payload(), - ) - }); - if transaction.is_some() { - return Ok(transaction); + if !pending_transactions.is_empty() { + return Ok(pending_transactions); } thread::sleep(retry_in); } else { @@ -1061,26 +1018,7 @@ impl Client { )); } } - Ok(None) - } - - /// Find the original transaction in the local pending tx queue. - /// Should be used for an MST case. - /// - /// # Errors - /// - if sending request fails - pub fn get_original_transaction( - &self, - transaction: &SignedTransaction, - retry_count: u32, - retry_in: Duration, - ) -> Result> { - self.get_original_transaction_with_pagination( - transaction, - retry_count, - retry_in, - Pagination::default(), - ) + Ok(Vec::new()) } /// Get value of config on peer @@ -1685,11 +1623,8 @@ mod tests { .expect("Client config should build as all required fields were provided"); let client = Client::new(&cfg).expect("Invalid client configuration"); - let build_transaction = || { - client - .build_transaction(Vec::::new(), UnlimitedMetadata::new()) - .unwrap() - }; + let build_transaction = + || client.build_transaction(Vec::::new(), UnlimitedMetadata::new()); let tx1 = build_transaction(); let tx2 = build_transaction(); assert_ne!(tx1.payload().hash(), tx2.payload().hash()); @@ -1708,7 +1643,7 @@ mod tests { tx.set_ttl(transaction_ttl); } - client.sign_transaction(tx).unwrap() + client.sign_transaction(tx) }; assert_eq!(tx1.payload().hash(), tx2.payload().hash()); } diff --git a/client/tests/integration/asset.rs b/client/tests/integration/asset.rs index e1f7723a806..2758c751962 100644 --- a/client/tests/integration/asset.rs +++ b/client/tests/integration/asset.rs @@ -106,7 +106,7 @@ fn client_add_asset_quantity_to_existing_asset_should_increase_asset_amount() -> AssetId::new(asset_definition_id.clone(), account_id.clone()), ); let instructions: [InstructionBox; 2] = [create_asset.into(), mint.into()]; - let tx = test_client.build_transaction(instructions, metadata)?; + let tx = test_client.build_transaction(instructions, metadata); test_client.submit_transaction(&tx)?; test_client.poll_request(client::asset::by_account_id(account_id), |result| { let assets = result.collect::>>().expect("Valid"); @@ -137,7 +137,7 @@ fn client_add_big_asset_quantity_to_existing_asset_should_increase_asset_amount( AssetId::new(asset_definition_id.clone(), account_id.clone()), ); let instructions: [InstructionBox; 2] = [create_asset.into(), mint.into()]; - let tx = test_client.build_transaction(instructions, metadata)?; + let tx = test_client.build_transaction(instructions, metadata); test_client.submit_transaction(&tx)?; test_client.poll_request(client::asset::by_account_id(account_id), |result| { let assets = result.collect::>>().expect("Valid"); @@ -169,7 +169,7 @@ fn client_add_asset_with_decimal_should_increase_asset_amount() -> Result<()> { AssetId::new(asset_definition_id.clone(), account_id.clone()), ); let instructions: [InstructionBox; 2] = [create_asset.into(), mint.into()]; - let tx = test_client.build_transaction(instructions, metadata)?; + let tx = test_client.build_transaction(instructions, metadata); test_client.submit_transaction(&tx)?; test_client.poll_request(client::asset::by_account_id(account_id.clone()), |result| { let assets = result.collect::>>().expect("Valid"); @@ -281,8 +281,7 @@ fn find_rate_and_make_exchange_isi_should_succeed() { let grant_asset_transfer_tx = TransactionBuilder::new(chain_id, asset_id.account_id().clone()) .with_instructions([allow_alice_to_transfer_asset]) - .sign(owner_keypair) - .expect("Failed to sign seller transaction"); + .sign(&owner_keypair); test_client .submit_transaction_blocking(&grant_asset_transfer_tx) diff --git a/client/tests/integration/burn_public_keys.rs b/client/tests/integration/burn_public_keys.rs index 9b73edb1b34..83552134f18 100644 --- a/client/tests/integration/burn_public_keys.rs +++ b/client/tests/integration/burn_public_keys.rs @@ -18,13 +18,10 @@ fn submit( let tx = if let Some((account_id, keypair)) = submitter { TransactionBuilder::new(chain_id, account_id) .with_instructions(instructions) - .sign(keypair) - .unwrap() + .sign(&keypair) } else { - let tx = client - .build_transaction(instructions, UnlimitedMetadata::default()) - .unwrap(); - client.sign_transaction(tx).unwrap() + let tx = client.build_transaction(instructions, UnlimitedMetadata::default()); + client.sign_transaction(tx) }; (tx.hash(), client.submit_transaction_blocking(&tx)) diff --git a/client/tests/integration/domain_owner.rs b/client/tests/integration/domain_owner.rs index 2ffc467ac42..da184e675b7 100644 --- a/client/tests/integration/domain_owner.rs +++ b/client/tests/integration/domain_owner.rs @@ -29,7 +29,7 @@ fn domain_owner_domain_permissions() -> Result<()> { // Asset definitions can't be registered by "bob@kingdom" by default let transaction = TransactionBuilder::new(chain_id.clone(), bob_id.clone()) .with_instructions([Register::asset_definition(coin.clone())]) - .sign(bob_keypair.clone())?; + .sign(&bob_keypair); let err = test_client .submit_transaction_blocking(&transaction) .expect_err("Tx should fail due to permissions"); @@ -57,7 +57,7 @@ fn domain_owner_domain_permissions() -> Result<()> { test_client.submit_blocking(Grant::permission(token.clone(), bob_id.clone()))?; let transaction = TransactionBuilder::new(chain_id, bob_id.clone()) .with_instructions([Register::asset_definition(coin)]) - .sign(bob_keypair)?; + .sign(&bob_keypair); test_client.submit_transaction_blocking(&transaction)?; test_client.submit_blocking(Revoke::permission(token, bob_id.clone()))?; @@ -175,7 +175,7 @@ fn domain_owner_asset_definition_permissions() -> Result<()> { let coin = AssetDefinition::quantity(coin_id.clone()); let transaction = TransactionBuilder::new(chain_id, bob_id.clone()) .with_instructions([Register::asset_definition(coin)]) - .sign(bob_keypair)?; + .sign(&bob_keypair); test_client.submit_transaction_blocking(&transaction)?; // check that "alice@wonderland" as owner of domain can transfer asset definitions in her domain @@ -246,7 +246,7 @@ fn domain_owner_asset_permissions() -> Result<()> { Register::asset_definition(coin), Register::asset_definition(store), ]) - .sign(bob_keypair)?; + .sign(&bob_keypair); test_client.submit_transaction_blocking(&transaction)?; // check that "alice@wonderland" as owner of domain can register and unregister assets in her domain diff --git a/client/tests/integration/events/data.rs b/client/tests/integration/events/data.rs index 7c6547cff9a..065ac37aa14 100644 --- a/client/tests/integration/events/data.rs +++ b/client/tests/integration/events/data.rs @@ -150,9 +150,7 @@ fn transaction_execution_should_produce_events( // submit transaction to produce events init_receiver.recv()?; - let transaction = client - .build_transaction(executable, UnlimitedMetadata::new()) - .unwrap(); + let transaction = client.build_transaction(executable, UnlimitedMetadata::new()); client.submit_transaction_blocking(&transaction)?; // assertion diff --git a/client/tests/integration/events/pipeline.rs b/client/tests/integration/events/pipeline.rs index ba4574d46e3..b4ee1520439 100644 --- a/client/tests/integration/events/pipeline.rs +++ b/client/tests/integration/events/pipeline.rs @@ -51,7 +51,7 @@ fn test_with_instruction_and_status_and_port( // Given let submitter = client; - let transaction = submitter.build_transaction(instruction, UnlimitedMetadata::new())?; + let transaction = submitter.build_transaction(instruction, UnlimitedMetadata::new()); let hash = transaction.payload().hash(); let mut handles = Vec::new(); for listener in clients { diff --git a/client/tests/integration/multisignature_transaction.rs b/client/tests/integration/multisignature_transaction.rs index 4cf5739788b..3095a9d9a5e 100644 --- a/client/tests/integration/multisignature_transaction.rs +++ b/client/tests/integration/multisignature_transaction.rs @@ -55,8 +55,8 @@ fn multisignature_transactions_should_wait_for_all_signatures() -> Result<()> { client_configuration.private_key = private_key1; let client = Client::new(&client_configuration)?; let instructions = [mint_asset.clone()]; - let transaction = client.build_transaction(instructions, UnlimitedMetadata::new())?; - client.submit_transaction(&client.sign_transaction(transaction)?)?; + let transaction = client.build_transaction(instructions, UnlimitedMetadata::new()); + client.submit_transaction(&client.sign_transaction(transaction))?; thread::sleep(pipeline_time); //Then @@ -81,11 +81,12 @@ fn multisignature_transactions_should_wait_for_all_signatures() -> Result<()> { client_configuration.private_key = private_key2; let client_2 = Client::new(&client_configuration)?; let instructions = [mint_asset]; - let transaction = client_2.build_transaction(instructions, UnlimitedMetadata::new())?; + let transaction = client_2.build_transaction(instructions, UnlimitedMetadata::new()); let transaction = client_2 - .get_original_transaction(&transaction, 3, Duration::from_millis(100))? + .get_original_matching_transactions(&transaction, 3, Duration::from_millis(100))? + .pop() .expect("Found no pending transaction for this account."); - client_2.submit_transaction(&client_2.sign_transaction(transaction)?)?; + client_2.submit_transaction(&client_2.sign_transaction(transaction))?; thread::sleep(pipeline_time); let assets = client_1 .request(request)? diff --git a/client/tests/integration/non_mintable.rs b/client/tests/integration/non_mintable.rs index c80be2ca4d9..b07df3158ff 100644 --- a/client/tests/integration/non_mintable.rs +++ b/client/tests/integration/non_mintable.rs @@ -28,7 +28,7 @@ fn non_mintable_asset_can_be_minted_once_but_not_twice() -> Result<()> { ); let instructions: [InstructionBox; 2] = [create_asset.into(), mint.clone().into()]; - let tx = test_client.build_transaction(instructions, metadata)?; + let tx = test_client.build_transaction(instructions, metadata); // We can register and mint the non-mintable token test_client.submit_transaction(&tx)?; diff --git a/client/tests/integration/offline_peers.rs b/client/tests/integration/offline_peers.rs index fc14502caa3..bc14bce0376 100644 --- a/client/tests/integration/offline_peers.rs +++ b/client/tests/integration/offline_peers.rs @@ -8,6 +8,7 @@ use iroha_client::{ }; use iroha_config::iroha::Configuration; use iroha_crypto::KeyPair; +use iroha_primitives::addr::socket_addr; use test_network::*; use tokio::runtime::Runtime; @@ -51,10 +52,10 @@ fn register_offline_peer() -> Result<()> { check_status(&peer_clients, 1); - let address = "128.0.0.2:8085".parse()?; + let address = socket_addr!(128.0.0.2:8085); let key_pair = KeyPair::generate().unwrap(); let public_key = key_pair.public_key().clone(); - let peer_id = PeerId::new(&address, &public_key); + let peer_id = PeerId::new(address, public_key); let register_peer = Register::peer(DataModelPeer::new(peer_id)); // Wait for some time to allow peers to connect diff --git a/client/tests/integration/permissions.rs b/client/tests/integration/permissions.rs index ec2ea6811e6..c6ccc522f29 100644 --- a/client/tests/integration/permissions.rs +++ b/client/tests/integration/permissions.rs @@ -6,6 +6,7 @@ use iroha_client::{ crypto::KeyPair, data_model::prelude::*, }; +use iroha_data_model::permission::PermissionToken; use iroha_genesis::GenesisNetwork; use serde_json::json; use test_network::{PeerBuilder, *}; @@ -98,8 +99,7 @@ fn permissions_disallow_asset_transfer() { ); let transfer_tx = TransactionBuilder::new(chain_id, mouse_id) .with_instructions([transfer_asset]) - .sign(mouse_keypair) - .expect("Failed to sign mouse transaction"); + .sign(&mouse_keypair); let err = iroha_client .submit_transaction_blocking(&transfer_tx) .expect_err("Transaction was not rejected."); @@ -150,8 +150,7 @@ fn permissions_disallow_asset_burn() { ); let burn_tx = TransactionBuilder::new(chain_id, mouse_id) .with_instructions([burn_asset]) - .sign(mouse_keypair) - .expect("Failed to sign mouse transaction"); + .sign(&mouse_keypair); let err = iroha_client .submit_transaction_blocking(&burn_tx) @@ -242,8 +241,7 @@ fn permissions_differ_not_only_by_names() { let grant_hats_access_tx = TransactionBuilder::new(chain_id.clone(), mouse_id.clone()) .with_instructions([allow_alice_to_set_key_value_in_hats]) - .sign(mouse_keypair.clone()) - .expect("Failed to sign mouse transaction"); + .sign(&mouse_keypair); client .submit_transaction_blocking(&grant_hats_access_tx) .expect("Failed grant permission to modify Mouse's hats"); @@ -279,8 +277,7 @@ fn permissions_differ_not_only_by_names() { let grant_shoes_access_tx = TransactionBuilder::new(chain_id, mouse_id) .with_instructions([allow_alice_to_set_key_value_in_shoes]) - .sign(mouse_keypair) - .expect("Failed to sign mouse transaction"); + .sign(&mouse_keypair); client .submit_transaction_blocking(&grant_shoes_access_tx) @@ -331,8 +328,7 @@ fn stored_vs_granted_token_payload() -> Result<()> { let transaction = TransactionBuilder::new(chain_id, mouse_id) .with_instructions([allow_alice_to_set_key_value_in_mouse_asset]) - .sign(mouse_keypair) - .expect("Failed to sign mouse transaction"); + .sign(&mouse_keypair); iroha_client .submit_transaction_blocking(&transaction) .expect("Failed to grant permission to alice."); @@ -381,3 +377,50 @@ fn permission_tokens_are_unified() { .submit_blocking(allow_alice_to_transfer_rose_2) .expect_err("permission tokens are not unified"); } + +#[test] +fn associated_permission_tokens_removed_on_unregister() { + let (_rt, _peer, iroha_client) = ::new().with_port(11_240).start_with_runtime(); + wait_for_genesis_committed(&[iroha_client.clone()], 0); + + let bob_id: AccountId = "bob@wonderland".parse().expect("Valid"); + let kingdom_id: DomainId = "kingdom".parse().expect("Valid"); + let kingdom = Domain::new(kingdom_id.clone()); + + // register kingdom and give bob permissions in this domain + let register_domain = Register::domain(kingdom); + let bob_to_set_kv_in_domain_token = PermissionToken::new( + "CanSetKeyValueInDomain".parse().unwrap(), + &json!({ "domain_id": kingdom_id }), + ); + let allow_bob_to_set_kv_in_domain = + Grant::permission(bob_to_set_kv_in_domain_token.clone(), bob_id.clone()); + + iroha_client + .submit_all_blocking([ + InstructionBox::from(register_domain), + allow_bob_to_set_kv_in_domain.into(), + ]) + .expect("failed to register domain and grant permission"); + + // check that bob indeed have granted permission + assert!(iroha_client + .request(client::permission::by_account_id(bob_id.clone())) + .and_then(std::iter::Iterator::collect::>>) + .expect("failed to get permissions for bob") + .into_iter() + .any(|token| { token == bob_to_set_kv_in_domain_token })); + + // unregister kingdom + iroha_client + .submit_blocking(Unregister::domain(kingdom_id)) + .expect("failed to unregister domain"); + + // check that permission is removed from bob + assert!(iroha_client + .request(client::permission::by_account_id(bob_id)) + .and_then(std::iter::Iterator::collect::>>) + .expect("failed to get permissions for bob") + .into_iter() + .all(|token| { token != bob_to_set_kv_in_domain_token })); +} diff --git a/client/tests/integration/queries/mod.rs b/client/tests/integration/queries/mod.rs index df0104e07bc..d654c8fc83b 100644 --- a/client/tests/integration/queries/mod.rs +++ b/client/tests/integration/queries/mod.rs @@ -49,7 +49,7 @@ fn live_query_is_dropped_after_smart_contract_end() -> Result<()> { let transaction = client.build_transaction( WasmSmartContract::from_compiled(wasm), UnlimitedMetadata::default(), - )?; + ); client.submit_transaction_blocking(&transaction)?; let metadata_value = client.request(FindAccountKeyValueByIdAndKey::new( diff --git a/client/tests/integration/roles.rs b/client/tests/integration/roles.rs index e91909837ef..a9102dc718d 100644 --- a/client/tests/integration/roles.rs +++ b/client/tests/integration/roles.rs @@ -80,7 +80,7 @@ fn register_and_grant_role_for_metadata_access() -> Result<()> { let grant_role = Grant::role(role_id.clone(), alice_id.clone()); let grant_role_tx = TransactionBuilder::new(chain_id, mouse_id.clone()) .with_instructions([grant_role]) - .sign(mouse_key_pair)?; + .sign(&mouse_key_pair); test_client.submit_transaction_blocking(&grant_role_tx)?; // Alice modifies Mouse's metadata diff --git a/client/tests/integration/smartcontracts/Cargo.toml b/client/tests/integration/smartcontracts/Cargo.toml index 1ab1801377d..eaba631dd4c 100644 --- a/client/tests/integration/smartcontracts/Cargo.toml +++ b/client/tests/integration/smartcontracts/Cargo.toml @@ -37,6 +37,7 @@ parity-scale-codec = { version = "3.2.1", default-features = false } anyhow = { version = "1.0.71", default-features = false } serde = { version = "1.0.151", default-features = false } serde_json = { version = "1.0.91", default-features = false } +getrandom = { version = "0.2", features = ["custom"] } lol_alloc = "0.4.0" panic-halt = "0.2.0" diff --git a/client/tests/integration/smartcontracts/create_nft_for_every_user_trigger/Cargo.toml b/client/tests/integration/smartcontracts/create_nft_for_every_user_trigger/Cargo.toml index a01eeabcb9e..dcb0c06e985 100644 --- a/client/tests/integration/smartcontracts/create_nft_for_every_user_trigger/Cargo.toml +++ b/client/tests/integration/smartcontracts/create_nft_for_every_user_trigger/Cargo.toml @@ -15,3 +15,4 @@ iroha_trigger.workspace = true panic-halt.workspace = true lol_alloc.workspace = true +getrandom.workspace = true diff --git a/client/tests/integration/smartcontracts/create_nft_for_every_user_trigger/src/lib.rs b/client/tests/integration/smartcontracts/create_nft_for_every_user_trigger/src/lib.rs index d9df633498d..94db029b335 100644 --- a/client/tests/integration/smartcontracts/create_nft_for_every_user_trigger/src/lib.rs +++ b/client/tests/integration/smartcontracts/create_nft_for_every_user_trigger/src/lib.rs @@ -13,6 +13,8 @@ use lol_alloc::{FreeListAllocator, LockedAllocator}; #[global_allocator] static ALLOC: LockedAllocator = LockedAllocator::new(FreeListAllocator::new()); +getrandom::register_custom_getrandom!(iroha_trigger::stub_getrandom); + #[iroha_trigger::main] fn main(_owner: AccountId, _event: Event) { iroha_trigger::log::info!("Executing trigger"); diff --git a/client/tests/integration/smartcontracts/executor_with_admin/Cargo.toml b/client/tests/integration/smartcontracts/executor_with_admin/Cargo.toml index c48ea913d35..440bb0ac557 100644 --- a/client/tests/integration/smartcontracts/executor_with_admin/Cargo.toml +++ b/client/tests/integration/smartcontracts/executor_with_admin/Cargo.toml @@ -16,3 +16,4 @@ iroha_schema.workspace = true panic-halt.workspace = true lol_alloc.workspace = true +getrandom.workspace = true diff --git a/client/tests/integration/smartcontracts/executor_with_admin/src/lib.rs b/client/tests/integration/smartcontracts/executor_with_admin/src/lib.rs index 8a950ee38cd..d861b6e13fd 100644 --- a/client/tests/integration/smartcontracts/executor_with_admin/src/lib.rs +++ b/client/tests/integration/smartcontracts/executor_with_admin/src/lib.rs @@ -12,6 +12,8 @@ use lol_alloc::{FreeListAllocator, LockedAllocator}; #[global_allocator] static ALLOC: LockedAllocator = LockedAllocator::new(FreeListAllocator::new()); +getrandom::register_custom_getrandom!(iroha_executor::stub_getrandom); + #[derive(Constructor, ValidateEntrypoints, Validate, Visit)] #[visit(custom(visit_instruction))] struct Executor { diff --git a/client/tests/integration/smartcontracts/executor_with_custom_token/Cargo.toml b/client/tests/integration/smartcontracts/executor_with_custom_token/Cargo.toml index 1b305798354..8d84b43f5f5 100644 --- a/client/tests/integration/smartcontracts/executor_with_custom_token/Cargo.toml +++ b/client/tests/integration/smartcontracts/executor_with_custom_token/Cargo.toml @@ -21,3 +21,4 @@ serde.workspace = true panic-halt.workspace = true lol_alloc.workspace = true +getrandom.workspace = true diff --git a/client/tests/integration/smartcontracts/executor_with_custom_token/src/lib.rs b/client/tests/integration/smartcontracts/executor_with_custom_token/src/lib.rs index 65c48e2db59..9cb0166657c 100644 --- a/client/tests/integration/smartcontracts/executor_with_custom_token/src/lib.rs +++ b/client/tests/integration/smartcontracts/executor_with_custom_token/src/lib.rs @@ -31,6 +31,8 @@ use serde_json::json; #[global_allocator] static ALLOC: LockedAllocator = LockedAllocator::new(FreeListAllocator::new()); +getrandom::register_custom_getrandom!(iroha_executor::stub_getrandom); + use alloc::format; mod token { diff --git a/client/tests/integration/smartcontracts/executor_with_migration_fail/Cargo.toml b/client/tests/integration/smartcontracts/executor_with_migration_fail/Cargo.toml index 813a5d74ef2..bee72c68c99 100644 --- a/client/tests/integration/smartcontracts/executor_with_migration_fail/Cargo.toml +++ b/client/tests/integration/smartcontracts/executor_with_migration_fail/Cargo.toml @@ -16,3 +16,4 @@ anyhow.workspace = true panic-halt.workspace = true lol_alloc.workspace = true +getrandom.workspace = true diff --git a/client/tests/integration/smartcontracts/executor_with_migration_fail/src/lib.rs b/client/tests/integration/smartcontracts/executor_with_migration_fail/src/lib.rs index e603758dd1d..1b04091ace8 100644 --- a/client/tests/integration/smartcontracts/executor_with_migration_fail/src/lib.rs +++ b/client/tests/integration/smartcontracts/executor_with_migration_fail/src/lib.rs @@ -15,6 +15,8 @@ use lol_alloc::{FreeListAllocator, LockedAllocator}; #[global_allocator] static ALLOC: LockedAllocator = LockedAllocator::new(FreeListAllocator::new()); +getrandom::register_custom_getrandom!(iroha_executor::stub_getrandom); + #[derive(Constructor, ValidateEntrypoints, Validate, Visit)] struct Executor { verdict: Result, diff --git a/client/tests/integration/smartcontracts/mint_rose_trigger/Cargo.toml b/client/tests/integration/smartcontracts/mint_rose_trigger/Cargo.toml index cf5deba651d..3c7447833a0 100644 --- a/client/tests/integration/smartcontracts/mint_rose_trigger/Cargo.toml +++ b/client/tests/integration/smartcontracts/mint_rose_trigger/Cargo.toml @@ -15,3 +15,4 @@ iroha_trigger.workspace = true panic-halt.workspace = true lol_alloc.workspace = true +getrandom.workspace = true diff --git a/client/tests/integration/smartcontracts/mint_rose_trigger/src/lib.rs b/client/tests/integration/smartcontracts/mint_rose_trigger/src/lib.rs index f794772bebd..a136d8307ed 100644 --- a/client/tests/integration/smartcontracts/mint_rose_trigger/src/lib.rs +++ b/client/tests/integration/smartcontracts/mint_rose_trigger/src/lib.rs @@ -13,6 +13,8 @@ use lol_alloc::{FreeListAllocator, LockedAllocator}; #[global_allocator] static ALLOC: LockedAllocator = LockedAllocator::new(FreeListAllocator::new()); +getrandom::register_custom_getrandom!(iroha_trigger::stub_getrandom); + /// Mint 1 rose for owner #[iroha_trigger::main] fn main(owner: AccountId, _event: Event) { diff --git a/client/tests/integration/smartcontracts/query_assets_and_save_cursor/Cargo.toml b/client/tests/integration/smartcontracts/query_assets_and_save_cursor/Cargo.toml index bc012a36958..313e8e91319 100644 --- a/client/tests/integration/smartcontracts/query_assets_and_save_cursor/Cargo.toml +++ b/client/tests/integration/smartcontracts/query_assets_and_save_cursor/Cargo.toml @@ -15,4 +15,5 @@ iroha_smart_contract.workspace = true panic-halt.workspace = true lol_alloc.workspace = true +getrandom.workspace = true serde_json = { version = "1.0.108", default-features = false } diff --git a/client/tests/integration/smartcontracts/query_assets_and_save_cursor/src/lib.rs b/client/tests/integration/smartcontracts/query_assets_and_save_cursor/src/lib.rs index 87137474596..2ec5094f4d1 100644 --- a/client/tests/integration/smartcontracts/query_assets_and_save_cursor/src/lib.rs +++ b/client/tests/integration/smartcontracts/query_assets_and_save_cursor/src/lib.rs @@ -16,6 +16,8 @@ use lol_alloc::{FreeListAllocator, LockedAllocator}; #[global_allocator] static ALLOC: LockedAllocator = LockedAllocator::new(FreeListAllocator::new()); +getrandom::register_custom_getrandom!(iroha_smart_contract::stub_getrandom); + /// Execute [`FindAllAssets`] and save cursor to the owner's metadata. #[iroha_smart_contract::main] fn main(owner: AccountId) { diff --git a/client/tests/integration/tx_history.rs b/client/tests/integration/tx_history.rs index 4d26d32fe19..85d81fbd3f8 100644 --- a/client/tests/integration/tx_history.rs +++ b/client/tests/integration/tx_history.rs @@ -48,7 +48,7 @@ fn client_has_rejected_and_acepted_txs_should_return_tx_history() -> Result<()> &mint_not_existed_asset }; let instructions: Vec = vec![mint_asset.clone().into()]; - let transaction = client.build_transaction(instructions, UnlimitedMetadata::new())?; + let transaction = client.build_transaction(instructions, UnlimitedMetadata::new()); client.submit_transaction(&transaction)?; } thread::sleep(pipeline_time * 5); diff --git a/client/tests/integration/upgrade.rs b/client/tests/integration/upgrade.rs index bc2e075ad7f..42ee2f623de 100644 --- a/client/tests/integration/upgrade.rs +++ b/client/tests/integration/upgrade.rs @@ -34,7 +34,7 @@ fn executor_upgrade_should_work() -> Result<()> { let transfer_alice_rose = Transfer::asset_quantity(alice_rose, 1_u32, admin_rose); let transfer_rose_tx = TransactionBuilder::new(chain_id.clone(), admin_id.clone()) .with_instructions([transfer_alice_rose.clone()]) - .sign(admin_keypair.clone())?; + .sign(&admin_keypair); let _ = client .submit_transaction_blocking(&transfer_rose_tx) .expect_err("Should fail"); @@ -48,7 +48,7 @@ fn executor_upgrade_should_work() -> Result<()> { // Creating new transaction instead of cloning, because we need to update it's creation time let transfer_rose_tx = TransactionBuilder::new(chain_id, admin_id) .with_instructions([transfer_alice_rose]) - .sign(admin_keypair)?; + .sign(&admin_keypair); client .submit_transaction_blocking(&transfer_rose_tx) .expect("Should succeed"); diff --git a/client_cli/pytests/.gitignore b/client_cli/pytests/.gitignore index 781710681e1..7a314dfc205 100644 --- a/client_cli/pytests/.gitignore +++ b/client_cli/pytests/.gitignore @@ -7,7 +7,6 @@ __pycache__/ # Virtual environment venv/ *.venv -.env # Pytest cache .test_cache/ @@ -34,3 +33,5 @@ venv/ ehthumbs.db Thumbs.db /allure-results/ + +.env diff --git a/client_cli/pytests/README.md b/client_cli/pytests/README.md index 610261cf789..ad585372c95 100644 --- a/client_cli/pytests/README.md +++ b/client_cli/pytests/README.md @@ -1,92 +1,191 @@ # Overview -This directory contains the pytest framework with test suites for Iroha 2's Client CLI. +This directory contains the `pytest` framework with test suites for the Iroha 2 Client CLI. + +For quick access to a topic that interests you, select one of the following: + +- [Framework Structure](#framework-structure) +- [Iroha 2 Test Model](#iroha-2-test-model) +- [Using Test Suites](#using-test-suites) + - [Custom Test Environment with Docker Compose](#custom-test-environment-with-docker-compose) + - [Poetry Configuration](#poetry-configuration) + - [Tests Configuration](#tests-configuration) +- [Running Tests](#running-tests) +- [Viewing Test Reports](#viewing-test-reports) + +## Framework Structure + +The framework is organized into the following directories: + +- `common`: Contains common constants and helpers used throughout the framework. +- `models`: Contains the data model classes for accounts, assets, and domains. +- `src`: Contains the source code for the Iroha 2 Client CLI tests, including the `client_cli.py` and related utilities. +- `test`: Contains the test suite for the framework, organized into subdirectories for different test categories (`accounts`, `assets`, `atomicity`, `domains`, and `roles`). + +The framework also includes the following configuration files in its root directory: + +- `poetry.lock` and `pyproject.toml` — configuration files for [Poetry](https://python-poetry.org/), the dependency management and virtual environment tool used in this test framework. + +All tests are written with [Allure Report](https://allurereport.org/) in mind, and therefore require certain configuration prior to being executed.\ +For details, see [Running Tests](#running-tests) and [Viewing Test Reports](#viewing-test-reports). ## Iroha 2 Test Model -The Iroha 2 Test Model consists of several test categories that cover different aspects of the Iroha 2 blockchain platform. The structure of the test model is as follows: -- **Configurations:** Test configurations for the Iroha 2 platform. +The Iroha 2 Test Model consists of several test categories that cover different aspects of the Iroha 2 blockchain platform.\ +The test model has the following structure: -- **Accounts:** Test cases for account-related operations, such as account registration, key management, and metadata manipulation. +- **Accounts**: Test cases for account-related operations. +- **Assets**: Test cases for asset-related operations. +- **Atomicity**: Test cases for transaction atomicity. +- **Domains**: Test cases for domain-related operations. +- **Roles**: Test cases for roles management. -- **Assets:** Test cases for asset-related operations, including asset creation, minting, burning, transferring, and managing asset definitions and metadata. + -- **Domains:** Test cases for domain-related operations, such as registering and unregistering domains. +## Using Test Suites -- **Roles:** Test cases for roles management. +> [!NOTE] +> The following instructions assume that you're using the `test_env.py` script that is being provided for the default test environment. +> However, it is possible to run the tests in a custom environment, e.g., with Docker Compose. +> For instructions on how to do so, see [Custom Test Environment with Docker Compose](#custom-test-environment-with-docker-compose). -- **Atomicity:** Test cases for transaction atomicity, including multiple instructions within a single transaction, paired instructions, and invalid instructions. +1. Set up a test environment using the [`test_env.py`](../../scripts/test_env.py) script: -## How to use -At first, you need to installed and running [Iroha 2](https://hyperledger.github.io/iroha-2-docs/guide/install.html), and also need to have built [Client CLI](https://hyperledger.github.io/iroha-2-docs/guide/build.html) + ```shell + # Must be executed from the repo root: + ./scripts/test_env.py setup + ``` -## Configuration + By default, this builds `iroha`, `iroha_client_cli`, and `kagami` binaries, and runs four peers with their API exposed through the `8080`-`8083` ports.\ + This behavior can be reconfigured. You can run `./scripts/test_env.py --help` to see the list of available commands and options. -To configure the application, you can use a `.env` file in the `client_cli/pytest` directory. The `.env` file should contain the following variables: +2. Install and configure [Poetry](https://python-poetry.org/).\ + For details, see [Poetry Configuration](#poetry-configuration) below. +3. Configure the tests by creating the following `.env` file in _this_ (`/client_cli/pytests/`) directory: -``` -CLIENT_CLI_DIR=/path/to/iroha_client_cli/with/config.json/dir/ -TORII_API_PORT_MIN=8080 -TORII_API_PORT_MAX=8083 -``` -Replace `/path/to/iroha_client_cli/dir` with the actual paths to the respective files on your system. + ```shell + CLIENT_CLI_DIR=/path/to/iroha_client_cli/with/config.json/dir/ + TORII_API_PORT_MIN=8080 + TORII_API_PORT_MAX=8083 + ``` -If the `.env` file is not present or these variables are not defined in it + For details, see [Tests Configuration](#tests-configuration) below. +4. Run the tests: -## Poetry Configuration + ```shell + poetry run pytest + ``` -This test framework uses [Poetry](https://python-poetry.org/) for dependency management and virtual environment setup. To get started with Poetry, follow these steps: +5. Once you are done, clean up the test environment: -1. Install Poetry by following the [official installation guide](https://python-poetry.org/docs/#installation). + ```shell + # Must be executed from the repo root: + ./scripts/test_env.py cleanup + ``` -2. Navigate to the `client_cli/pytests` directory in your terminal. +### Custom Test Environment with Docker Compose + +By default, we provide the [`test_env.py`](../../scripts/test_env.py) script to set up a test environment. This environment is composed of a running network of Iroha peers and an `iroha_client_cli` configuration to interact with it. + +However, if for any reason this approach is inconvenient, it is possible to set up a custom network of Iroha peers using the provided Docker Compose configurations. + +To do so, perform the following steps: + +1. Have a local or remote server that has a custom Docker Compose development environment already setup: + + ```bash + docker-compose -f docker-compose.dev.yml up + ``` + +2. Build the `iroha_client_cli` binary: + + ```bash + cargo build --bin iroha_client_cli + ``` + +3. Create a new directory, then copy the `iroha_client_cli` binary and its `config.json` configuration file into it: + + ```shell + # Create a new directory: + mkdir test_client + # Copy the files: + cp configs/client/config.json test_client + cp target/debug/iroha_client_cli test_client + ``` + +4. Proceed with _Step 2_ of the [Using Test Suites](#using-test-suites) instructions. +> [!NOTE] +> Don't forget to specify the path to the directory created for the `iroha_client_cli` binary and its `config.json` configuration file (see Step 3) in the `CLIENT_CLI_DIR` variable of the `.env` file. +> For details, see [Tests Configuration](#tests-configuration) below. + +### Poetry Configuration + +This test framework uses [Poetry](https://python-poetry.org/) for dependency management and virtual environment setup. + +To get started with Poetry, follow these steps: + +1. Install Poetry by following the [official installation guide](https://python-poetry.org/docs/#installation). +2. Navigate to the `client_cli/pytests` directory in your terminal. 3. Install the dependencies and set up a virtual environment using Poetry: - ```bash - poetry install - ``` + + ```bash + poetry install + ``` + 4. Activate the virtual environment: - ```bash - poetry shell - ``` - Now, you should be in the virtual environment with all the required dependencies installed. All the subsequent commands (e.g., pytest, allure) should be executed within this virtual environment. - 5. When you're done working in the virtual environment, deactivate it by running: - ```bash - exit - ``` -## Run tests + ```bash + poetry shell + ``` + +Now you are in the virtual environment with all the required dependencies installed. All the subsequent commands (e.g., `pytest`, `allure`) must be executed within this virtual environment. -To run tests and generate a report in the allure-results folder, execute the following command: +Once you're done working with the virtual environment, deactivate it: ```bash -pytest -k "not xfail" --alluredir allure-results +exit ``` -The `--alluredir` option specifies the directory where the report should be stored. +### Tests Configuration -## View the report +Tests are configured via environment variables. These variables can be optionally defined in a `.env` file that must be created in _this_ (`/client_cli/pytests/`) directory. -To launch a web server that serves the Allure report generated, run: +The variables: -```bash -allure serve allure-results +- `CLIENT_CLI_DIR` — Specifies a path to a directory containing the `iroha_client_cli` binary and its `config.json` configuration file.\ + Set to `/client_cli`, by default. +- `TORII_API_PORT_MIN`/`TORII_API_PORT_MAX` — This pair specifies the range of local ports through which the Iroha 2 peers are deployed. A randomly selected port from the specified range is used for each test.\ + Set to `8080` and `8083` respectively, by default. + +**Example**: + +```shell +CLIENT_CLI_DIR=/path/to/iroha_client_cli/with/config.json/dir/ +TORII_API_PORT_MIN=8080 +TORII_API_PORT_MAX=8083 ``` -The `allure-results` argument specifies the directory where the report is stored. After running this command, you should be able to view the report in your web browser by navigating to `http://localhost:port`, where port is the port number displayed in the console output. +## Running Tests -## Structure -The framework is organized into the following directories: +To run tests and generate an [Allure](https://allurereport.org/) report in to the `allure-results` folder, execute the following command: + +```bash +pytest -k "not xfail" --alluredir allure-results +``` -`common`: Contains common constants and helpers used throughout the framework. +The `-k` option specifies tests which contain names that match the given string expression (case-insensitive), which can include Python operators that use filenames, class names and function names as variables.\ +The `"not xfail"` value specifies that only tests that are _not_ signed with the [`xfail`](https://docs.pytest.org/en/6.2.x/skipping.html#xfail-mark-test-functions-as-expected-to-fail) marking will be conducted.\ +This is due to the fact that tests with the `xfail` marking are currently Work-in-Progress and expected to fail. -`models`: Contains the data model classes for accounts, assets, and domains. +The `--alluredir` option specifies the directory where the report is stored. -`src`: Contains the source code for the Iroha 2 Client CLI tests, including the client CLI and related utilities. +## Viewing Test Reports -`test`: Contains the test suite for the framework, organized into subdirectories for different test categories (accounts, assets, atomicity, domains, and permissions). +To launch a web server that serves the generated [Allure](https://allurereport.org/) report, execute the following command: -The framework also includes configuration files: +```bash +allure serve allure-results +``` -`poetry.lock` and `pyproject.toml`: Configuration files for Poetry, the dependency management and virtual environment tool used in this framework. -`pytest.ini`: Configuration file for pytest, the testing framework used in this framework. +The `allure-results` argument specifies the directory where the report is stored. After running this command, you will be able to view the report in your web browser by navigating to `http://localhost:port`, where `port` is the port number displayed in the terminal output. diff --git a/client_cli/src/main.rs b/client_cli/src/main.rs index 0238fdc32c6..fca6f194a27 100644 --- a/client_cli/src/main.rs +++ b/client_cli/src/main.rs @@ -253,40 +253,41 @@ fn submit( ) -> Result<()> { let iroha_client = Client::new(context.configuration())?; let instructions = instructions.into(); - #[cfg(debug_assertions)] - let err_msg = format!("Failed to build transaction from instruction {instructions:?}"); - #[cfg(not(debug_assertions))] - let err_msg = "Failed to build transaction."; - let tx = iroha_client - .build_transaction(instructions, metadata) - .wrap_err(err_msg)?; - let tx = if context.skip_mst_check() { - tx + let tx = iroha_client.build_transaction(instructions, metadata); + let transactions = if context.skip_mst_check() { + vec![tx] } else { - match iroha_client.get_original_transaction( + match iroha_client.get_original_matching_transactions( &tx, RETRY_COUNT_MST, RETRY_IN_MST, ) { - Ok(Some(original_transaction)) if Confirm::new() - .with_prompt("There is a similar transaction from your account waiting for more signatures. \ - This could be because it wasn't signed with the right key, \ - or because it's a multi-signature transaction (MST). \ - Do you want to sign this transaction (yes) \ + Ok(original_transactions) if !original_transactions.is_empty() && Confirm::new() + .with_prompt("There are similar transactions from your account waiting for more signatures. \ + This could be because they weren't signed with the right key, \ + or because they're a multi-signature transactions (MST). \ + Do you want to sign these transactions (yes) \ instead of submitting a new transaction (no)?") .interact() - .wrap_err("Failed to show interactive prompt.")? => iroha_client.sign_transaction(original_transaction).wrap_err("Failed to sign transaction.")?, - _ => tx, + .wrap_err("Failed to show interactive prompt.")? => { + original_transactions.into_iter().map(|transaction| { + iroha_client.sign_transaction(transaction) + }).collect() + } + _ => vec![tx], } }; - #[cfg(debug_assertions)] - let err_msg = format!("Failed to submit transaction {tx:?}"); #[cfg(not(debug_assertions))] let err_msg = "Failed to submit transaction."; - let hash = iroha_client - .submit_transaction_blocking(&tx) - .wrap_err(err_msg)?; - context.print_data(&hash)?; + for tx in transactions { + #[cfg(debug_assertions)] + let err_msg = format!("Failed to submit transaction {tx:?}"); + let hash = iroha_client + .submit_transaction_blocking(&tx) + .wrap_err(err_msg)?; + context.print_data(&hash)?; + } + Ok(()) } @@ -1046,17 +1047,17 @@ mod peer { #[derive(clap::Subcommand, Debug)] pub enum Args { /// Register subcommand of peer - Register(Register), + Register(Box), /// Unregister subcommand of peer - Unregister(Unregister), + Unregister(Box), } impl RunArgs for Args { fn run(self, context: &mut dyn RunContext) -> Result<()> { - match_all!( - (self, context), - { Args::Register, Args::Unregister } - ) + match self { + Args::Register(register) => RunArgs::run(*register, context), + Args::Unregister(unregister) => RunArgs::run(*unregister, context), + } } } @@ -1080,9 +1081,8 @@ mod peer { key, metadata, } = self; - let register_peer = iroha_client::data_model::isi::Register::peer(Peer::new( - PeerId::new(&address, &key), - )); + let register_peer = + iroha_client::data_model::isi::Register::peer(Peer::new(PeerId::new(address, key))); submit([register_peer], metadata.load()?, context).wrap_err("Failed to register peer") } } @@ -1108,7 +1108,7 @@ mod peer { metadata, } = self; let unregister_peer = - iroha_client::data_model::isi::Unregister::peer(PeerId::new(&address, &key)); + iroha_client::data_model::isi::Unregister::peer(PeerId::new(address, key)); submit([unregister_peer], metadata.load()?, context) .wrap_err("Failed to unregister peer") } diff --git a/config/src/client.rs b/config/src/client.rs index 41c5f30240d..bdf559ddcda 100644 --- a/config/src/client.rs +++ b/config/src/client.rs @@ -180,7 +180,7 @@ mod tests { // TODO: make tests to check generated key validity fn arb_keys_from_seed() (seed in prop::collection::vec(any::(), 33..64)) -> (PublicKey, PrivateKey) { - let (public_key, private_key) = KeyPair::generate_with_configuration(KeyGenConfiguration::default().use_seed(seed)).expect("Seed was invalid").into(); + let (public_key, private_key) = KeyPair::generate_with_configuration(KeyGenConfiguration::from_seed(seed)).expect("Seed was invalid").into(); (public_key, private_key) } } diff --git a/config/src/iroha.rs b/config/src/iroha.rs index b4be726e12b..ac33c9a2f32 100644 --- a/config/src/iroha.rs +++ b/config/src/iroha.rs @@ -120,7 +120,7 @@ impl ConfigurationProxy { if let Some(torii_proxy) = &mut self.torii { if sumeragi_proxy.peer_id.is_none() { sumeragi_proxy.peer_id = Some(iroha_data_model::prelude::PeerId::new( - &torii_proxy + torii_proxy .p2p_addr .clone() .ok_or(ConfigError::MissingField { @@ -128,7 +128,7 @@ impl ConfigurationProxy { message: "`p2p_addr` should not be set to `null` or `None` explicitly.", })?, - &self.public_key.clone().expect( + self.public_key.clone().expect( "Iroha `public_key` should have been initialized above at the latest", ), )); diff --git a/config/src/torii.rs b/config/src/torii.rs index 7dea529aa54..d77457f0ddb 100644 --- a/config/src/torii.rs +++ b/config/src/torii.rs @@ -62,7 +62,7 @@ pub mod uri { /// The web socket uri used to subscribe to blocks stream. pub const BLOCKS_STREAM: &str = "block/stream"; /// Get pending transactions. - pub const PENDING_TRANSACTIONS: &str = "pending_transactions"; + pub const MATCHING_PENDING_TRANSACTIONS: &str = "matching_pending_transactions"; /// The URI for local config changing inspecting pub const CONFIGURATION: &str = "configuration"; /// URI to report status for administration @@ -74,6 +74,8 @@ pub mod uri { pub const SCHEMA: &str = "schema"; /// URI for getting the API version currently used pub const API_VERSION: &str = "api_version"; + /// URI for getting cpu profile + pub const PROFILE: &str = "debug/pprof/profile"; } #[cfg(test)] diff --git a/config/src/wasm.rs b/config/src/wasm.rs index 9e49f8d9391..6983a7ac095 100644 --- a/config/src/wasm.rs +++ b/config/src/wasm.rs @@ -7,7 +7,7 @@ use self::default::*; /// Module with a set of default values. pub mod default { /// Default amount of fuel provided for execution - pub const DEFAULT_FUEL_LIMIT: u64 = 23_000_000; + pub const DEFAULT_FUEL_LIMIT: u64 = 30_000_000; /// Default amount of memory given for smart contract pub const DEFAULT_MAX_MEMORY: u32 = 500 * 2_u32.pow(20); // 500 MiB } diff --git a/configs/peer/config.json b/configs/peer/config.json index 2695398702f..d5533baf83e 100644 --- a/configs/peer/config.json +++ b/configs/peer/config.json @@ -70,7 +70,7 @@ "max_wasm_size_bytes": 4194304 }, "WASM_RUNTIME_CONFIG": { - "FUEL_LIMIT": 23000000, + "FUEL_LIMIT": 30000000, "MAX_MEMORY": 524288000 } }, diff --git a/configs/peer/executor.wasm b/configs/peer/executor.wasm index a7abc19985d..edbf46b4cfb 100644 Binary files a/configs/peer/executor.wasm and b/configs/peer/executor.wasm differ diff --git a/configs/peer/genesis.json b/configs/peer/genesis.json index 22614a1ceb3..02ec4b1fa18 100644 --- a/configs/peer/genesis.json +++ b/configs/peer/genesis.json @@ -154,7 +154,7 @@ "NewParameter": "?WSVIdentLengthLimits=1,128_LL" }, { - "NewParameter": "?WASMFuelLimit=23000000" + "NewParameter": "?WASMFuelLimit=30000000" }, { "NewParameter": "?WASMMaxMemory=524288000" diff --git a/core/Cargo.toml b/core/Cargo.toml index dc040225591..e2f86e46f47 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -29,6 +29,8 @@ cli = [] dev-telemetry = ["telemetry", "iroha_telemetry/dev-telemetry"] # Support Prometheus metrics. See https://prometheus.io/. expensive-telemetry = ["iroha_telemetry/metric-instrumentation"] +# Profiler integration for wasmtime +profiling = [] [badges] is-it-maintained-issue-resolution = { repository = "https://github.com/hyperledger/iroha" } diff --git a/core/benches/blocks/apply_blocks.rs b/core/benches/blocks/apply_blocks.rs index f255922105c..5a2e48b7e82 100644 --- a/core/benches/blocks/apply_blocks.rs +++ b/core/benches/blocks/apply_blocks.rs @@ -19,13 +19,13 @@ impl WsvApplyBlocks { /// - Failed to parse [`AccountId`] /// - Failed to generate [`KeyPair`] /// - Failed to create instructions for block - pub fn setup() -> Result { + pub fn setup(rt: &tokio::runtime::Handle) -> Result { let domains = 100; let accounts_per_domain = 1000; let assets_per_domain = 1000; let account_id: AccountId = "alice@wonderland".parse()?; let key_pair = KeyPair::generate()?; - let wsv = build_wsv(&account_id, &key_pair); + let wsv = build_wsv(rt, &account_id, &key_pair); let nth = 100; let instructions = [ @@ -40,8 +40,7 @@ impl WsvApplyBlocks { instructions .into_iter() .map(|instructions| { - let block = - create_block(&mut wsv, instructions, account_id.clone(), key_pair.clone()); + let block = create_block(&mut wsv, instructions, account_id.clone(), &key_pair); wsv.apply_without_execution(&block).map(|()| block) }) .collect::, _>>()? diff --git a/core/benches/blocks/apply_blocks_benchmark.rs b/core/benches/blocks/apply_blocks_benchmark.rs index 730d6f13037..2c9c496f0fb 100644 --- a/core/benches/blocks/apply_blocks_benchmark.rs +++ b/core/benches/blocks/apply_blocks_benchmark.rs @@ -6,17 +6,19 @@ use apply_blocks::WsvApplyBlocks; use criterion::{black_box, criterion_group, criterion_main, Criterion}; fn apply_blocks(c: &mut Criterion) { - tokio::runtime::Runtime::new().unwrap().block_on(async { - let bench = WsvApplyBlocks::setup().expect("Failed to setup benchmark"); - let mut group = c.benchmark_group("apply_blocks"); - group.significance_level(0.1).sample_size(10); - group.bench_function("apply_blocks", |b| { - b.iter(|| { - WsvApplyBlocks::measure(black_box(&bench)).expect("Failed to execute benchmark"); - }); + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .expect("Failed building the Runtime"); + let bench = WsvApplyBlocks::setup(rt.handle()).expect("Failed to setup benchmark"); + let mut group = c.benchmark_group("apply_blocks"); + group.significance_level(0.1).sample_size(10); + group.bench_function("apply_blocks", |b| { + b.iter(|| { + WsvApplyBlocks::measure(black_box(&bench)).expect("Failed to execute benchmark"); }); - group.finish(); }); + group.finish(); } criterion_group!(wsv, apply_blocks); diff --git a/core/benches/blocks/apply_blocks_oneshot.rs b/core/benches/blocks/apply_blocks_oneshot.rs index f16a5bf5e57..da6ce8527dd 100644 --- a/core/benches/blocks/apply_blocks_oneshot.rs +++ b/core/benches/blocks/apply_blocks_oneshot.rs @@ -9,10 +9,16 @@ mod apply_blocks; use apply_blocks::WsvApplyBlocks; -#[tokio::main] -async fn main() { - iroha_logger::test_logger(); +fn main() { + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .expect("Failed building the Runtime"); + { + let _guard = rt.enter(); + iroha_logger::test_logger(); + } iroha_logger::info!("Starting..."); - let bench = WsvApplyBlocks::setup().expect("Failed to setup benchmark"); + let bench = WsvApplyBlocks::setup(rt.handle()).expect("Failed to setup benchmark"); WsvApplyBlocks::measure(&bench).expect("Failed to execute benchmark"); } diff --git a/core/benches/blocks/common.rs b/core/benches/blocks/common.rs index 2905c796435..0c2ec273287 100644 --- a/core/benches/blocks/common.rs +++ b/core/benches/blocks/common.rs @@ -25,14 +25,13 @@ pub fn create_block( wsv: &mut WorldStateView, instructions: Vec, account_id: AccountId, - key_pair: KeyPair, + key_pair: &KeyPair, ) -> CommittedBlock { let chain_id = ChainId::new("0"); let transaction = TransactionBuilder::new(chain_id.clone(), account_id) .with_instructions(instructions) - .sign(key_pair.clone()) - .unwrap(); + .sign(key_pair); let limits = wsv.transaction_executor().transaction_limits; let topology = Topology::new(UniqueVec::new()); @@ -43,7 +42,6 @@ pub fn create_block( ) .chain(0, wsv) .sign(key_pair) - .unwrap() .commit(&topology) .unwrap(); @@ -170,9 +168,16 @@ pub fn restore_every_nth( instructions } -pub fn build_wsv(account_id: &AccountId, key_pair: &KeyPair) -> WorldStateView { +pub fn build_wsv( + rt: &tokio::runtime::Handle, + account_id: &AccountId, + key_pair: &KeyPair, +) -> WorldStateView { let kura = iroha_core::kura::Kura::blank_kura_for_testing(); - let query_handle = LiveQueryStore::test().start(); + let query_handle = { + let _guard = rt.enter(); + LiveQueryStore::test().start() + }; let mut domain = Domain::new(account_id.domain_id.clone()).build(account_id); domain.accounts.insert( account_id.clone(), diff --git a/core/benches/blocks/validate_blocks.rs b/core/benches/blocks/validate_blocks.rs index f39e7eb288e..9b59b9f7ed7 100644 --- a/core/benches/blocks/validate_blocks.rs +++ b/core/benches/blocks/validate_blocks.rs @@ -22,13 +22,13 @@ impl WsvValidateBlocks { /// - Failed to parse [`AccountId`] /// - Failed to generate [`KeyPair`] /// - Failed to create instructions for block - pub fn setup() -> Result { + pub fn setup(rt: &tokio::runtime::Handle) -> Result { let domains = 100; let accounts_per_domain = 1000; let assets_per_domain = 1000; let account_id: AccountId = "alice@wonderland".parse()?; let key_pair = KeyPair::generate()?; - let wsv = build_wsv(&account_id, &key_pair); + let wsv = build_wsv(rt, &account_id, &key_pair); let nth = 100; let instructions = [ @@ -69,7 +69,7 @@ impl WsvValidateBlocks { assert_eq!(wsv.height(), 0); for (instructions, i) in instructions.into_iter().zip(1..) { finalized_wsv = wsv.clone(); - let block = create_block(&mut wsv, instructions, account_id.clone(), key_pair.clone()); + let block = create_block(&mut wsv, instructions, account_id.clone(), &key_pair); wsv.apply_without_execution(&block)?; assert_eq!(wsv.height(), i); assert_eq!(wsv.height(), finalized_wsv.height() + 1); diff --git a/core/benches/blocks/validate_blocks_benchmark.rs b/core/benches/blocks/validate_blocks_benchmark.rs index 1417a1a426f..548c1bbb0eb 100644 --- a/core/benches/blocks/validate_blocks_benchmark.rs +++ b/core/benches/blocks/validate_blocks_benchmark.rs @@ -6,7 +6,11 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; use validate_blocks::WsvValidateBlocks; fn validate_blocks(c: &mut Criterion) { - let bench = WsvValidateBlocks::setup().expect("Failed to setup benchmark"); + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .expect("Failed building the Runtime"); + let bench = WsvValidateBlocks::setup(rt.handle()).expect("Failed to setup benchmark"); let mut group = c.benchmark_group("validate_blocks"); group.significance_level(0.1).sample_size(10); diff --git a/core/benches/blocks/validate_blocks_oneshot.rs b/core/benches/blocks/validate_blocks_oneshot.rs index 403adbd0a22..abfc09f7e7b 100644 --- a/core/benches/blocks/validate_blocks_oneshot.rs +++ b/core/benches/blocks/validate_blocks_oneshot.rs @@ -10,8 +10,16 @@ mod validate_blocks; use validate_blocks::WsvValidateBlocks; fn main() { + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .expect("Failed building the Runtime"); + { + let _guard = rt.enter(); + iroha_logger::test_logger(); + } iroha_logger::test_logger(); iroha_logger::info!("Starting..."); - let bench = WsvValidateBlocks::setup().expect("Failed to setup benchmark"); + let bench = WsvValidateBlocks::setup(rt.handle()).expect("Failed to setup benchmark"); WsvValidateBlocks::measure(bench).expect("Failed to execute bnechmark"); } diff --git a/core/benches/kura.rs b/core/benches/kura.rs index a7508e9567c..3c9e9b103e2 100644 --- a/core/benches/kura.rs +++ b/core/benches/kura.rs @@ -32,8 +32,7 @@ async fn measure_block_size_for_n_executors(n_executors: u32) { AccountId::from_str("alice@wonderland").expect("checked"), ) .with_instructions([transfer]) - .sign(keypair.clone()) - .expect("Failed to sign."); + .sign(&keypair); let transaction_limits = TransactionLimits { max_instruction_number: 4096, max_wasm_size_bytes: 0, @@ -54,13 +53,10 @@ async fn measure_block_size_for_n_executors(n_executors: u32) { let topology = Topology::new(UniqueVec::new()); let mut block = BlockBuilder::new(vec![tx], topology, Vec::new()) .chain(0, &mut wsv) - .sign(KeyPair::generate().unwrap()) - .unwrap(); + .sign(&KeyPair::generate().unwrap()); for _ in 1..n_executors { - block = block - .sign(KeyPair::generate().expect("Failed to generate KeyPair.")) - .unwrap(); + block = block.sign(&KeyPair::generate().unwrap()); } let mut block_store = BlockStore::new(dir.path(), LockStatus::Unlocked); block_store.create_files_if_they_do_not_exist().unwrap(); diff --git a/core/benches/validation.rs b/core/benches/validation.rs index 93d1b8b5356..970bc80571c 100644 --- a/core/benches/validation.rs +++ b/core/benches/validation.rs @@ -2,7 +2,7 @@ use std::str::FromStr as _; -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use iroha_core::{ block::*, prelude::*, @@ -23,7 +23,7 @@ const TRANSACTION_LIMITS: TransactionLimits = TransactionLimits { max_wasm_size_bytes: 0, }; -fn build_test_transaction(keys: KeyPair, chain_id: ChainId) -> SignedTransaction { +fn build_test_transaction(keys: &KeyPair, chain_id: ChainId) -> SignedTransaction { let domain_name = "domain"; let domain_id = DomainId::from_str(domain_name).expect("does not panic"); let create_domain: InstructionBox = Register::domain(Domain::new(domain_id)).into(); @@ -56,7 +56,6 @@ fn build_test_transaction(keys: KeyPair, chain_id: ChainId) -> SignedTransaction ) .with_instructions(instructions) .sign(keys) - .expect("Failed to sign.") } fn build_test_and_transient_wsv(keys: KeyPair) -> WorldStateView { @@ -99,7 +98,7 @@ fn accept_transaction(criterion: &mut Criterion) { let chain_id = ChainId::new("0"); let keys = KeyPair::generate().expect("Failed to generate keys"); - let transaction = build_test_transaction(keys, chain_id.clone()); + let transaction = build_test_transaction(&keys, chain_id.clone()); let mut success_count = 0; let mut failures_count = 0; let _ = criterion.bench_function("accept", |b| { @@ -117,17 +116,20 @@ fn sign_transaction(criterion: &mut Criterion) { let chain_id = ChainId::new("0"); let keys = KeyPair::generate().expect("Failed to generate keys"); - let transaction = build_test_transaction(keys, chain_id); + let transaction = build_test_transaction(&keys, chain_id); let key_pair = KeyPair::generate().expect("Failed to generate KeyPair."); - let mut success_count = 0; - let mut failures_count = 0; + let mut count = 0; let _ = criterion.bench_function("sign", |b| { - b.iter(|| match transaction.clone().sign(key_pair.clone()) { - Ok(_) => success_count += 1, - Err(_) => failures_count += 1, - }); + b.iter_batched( + || transaction.clone(), + |transaction| { + let _: SignedTransaction = transaction.sign(&key_pair); + count += 1; + }, + BatchSize::SmallInput, + ); }); - println!("Success count: {success_count}, Failures count: {failures_count}"); + println!("Count: {count}"); } fn validate_transaction(criterion: &mut Criterion) { @@ -135,7 +137,7 @@ fn validate_transaction(criterion: &mut Criterion) { let keys = KeyPair::generate().expect("Failed to generate keys"); let transaction = AcceptedTransaction::accept( - build_test_transaction(keys.clone(), chain_id.clone()), + build_test_transaction(&keys, chain_id.clone()), &chain_id, &TRANSACTION_LIMITS, ) @@ -161,7 +163,7 @@ fn sign_blocks(criterion: &mut Criterion) { let keys = KeyPair::generate().expect("Failed to generate keys"); let transaction = AcceptedTransaction::accept( - build_test_transaction(keys, chain_id.clone()), + build_test_transaction(&keys, chain_id.clone()), &chain_id, &TRANSACTION_LIMITS, ) @@ -172,18 +174,21 @@ fn sign_blocks(criterion: &mut Criterion) { let mut wsv = WorldStateView::new(World::new(), kura, query_handle); let topology = Topology::new(UniqueVec::new()); - let mut success_count = 0; - let mut failures_count = 0; + let mut count = 0; let block = BlockBuilder::new(vec![transaction], topology, Vec::new()).chain(0, &mut wsv); let _ = criterion.bench_function("sign_block", |b| { - b.iter(|| match block.clone().sign(key_pair.clone()) { - Ok(_) => success_count += 1, - Err(_) => failures_count += 1, - }); + b.iter_batched( + || block.clone(), + |block| { + let _: ValidBlock = block.sign(&key_pair); + count += 1; + }, + BatchSize::SmallInput, + ); }); - println!("Success count: {success_count}, Failures count: {failures_count}"); + println!("Count: {count}"); } criterion_group!( diff --git a/core/src/block.rs b/core/src/block.rs index 3c79e5dd8f4..ea695adfb1e 100644 --- a/core/src/block.rs +++ b/core/src/block.rs @@ -216,20 +216,16 @@ mod chained { impl BlockBuilder { /// Sign this block and get [`SignedBlock`]. - /// - /// # Errors - /// - /// Fails if signature generation fails - pub fn sign(self, key_pair: KeyPair) -> Result { - let signature = SignatureOf::new(key_pair, &self.0 .0)?; + pub fn sign(self, key_pair: &KeyPair) -> ValidBlock { + let signature = SignatureOf::new(key_pair, &self.0 .0); - Ok(ValidBlock( + ValidBlock( SignedBlockV1 { payload: self.0 .0, signatures: SignaturesOf::from(signature), } .into(), - )) + ) } } } @@ -430,12 +426,9 @@ mod valid { } /// Add additional signatures for [`Self`]. - /// - /// # Errors - /// - /// If signature generation fails - pub fn sign(self, key_pair: KeyPair) -> Result { - self.0.sign(key_pair).map(ValidBlock) + #[must_use] + pub fn sign(self, key_pair: &KeyPair) -> Self { + ValidBlock(self.0.sign(key_pair)) } /// Add additional signature for [`Self`] @@ -465,8 +458,7 @@ mod valid { commit_topology: UniqueVec::new(), event_recommendations: Vec::new(), })) - .sign(KeyPair::generate().unwrap()) - .unwrap() + .sign(&KeyPair::generate().unwrap()) } /// Check if block's signatures meet requirements for given topology. @@ -540,9 +532,7 @@ mod valid { let payload = block.payload().clone(); key_pairs .iter() - .map(|key_pair| { - SignatureOf::new(key_pair.clone(), &payload).expect("Failed to sign") - }) + .map(|key_pair| SignatureOf::new(key_pair, &payload)) .try_for_each(|signature| block.add_signature(signature)) .expect("Failed to add signatures"); @@ -565,9 +555,7 @@ mod valid { key_pairs .iter() .enumerate() - .map(|(_, key_pair)| { - SignatureOf::new(key_pair.clone(), &payload).expect("Failed to sign") - }) + .map(|(_, key_pair)| SignatureOf::new(key_pair, &payload)) .try_for_each(|signature| block.add_signature(signature)) .expect("Failed to add signatures"); @@ -588,8 +576,7 @@ mod valid { let mut block = ValidBlock::new_dummy(); let payload = block.payload().clone(); - let proxy_tail_signature = - SignatureOf::new(key_pairs[4].clone(), &payload).expect("Failed to sign"); + let proxy_tail_signature = SignatureOf::new(&key_pairs[4], &payload); block .add_signature(proxy_tail_signature) .expect("Failed to add signature"); @@ -621,7 +608,7 @@ mod valid { .iter() .enumerate() .filter(|(i, _)| *i != 4) // Skip proxy tail - .map(|(_, key_pair)| SignatureOf::new(key_pair.clone(), &payload).expect("Failed to sign")) + .map(|(_, key_pair)| SignatureOf::new(key_pair, &payload)) .try_for_each(|signature| block.add_signature(signature)) .expect("Failed to add signatures"); @@ -748,8 +735,7 @@ mod tests { let transaction_limits = &wsv.transaction_executor().transaction_limits; let tx = TransactionBuilder::new(chain_id.clone(), alice_id) .with_instructions([create_asset_definition]) - .sign(alice_keys.clone()) - .expect("Valid"); + .sign(&alice_keys); let tx = AcceptedTransaction::accept(tx, &chain_id, transaction_limits).expect("Valid"); // Creating a block of two identical transactions and validating it @@ -757,8 +743,7 @@ mod tests { let topology = Topology::new(UniqueVec::new()); let valid_block = BlockBuilder::new(transactions, topology, Vec::new()) .chain(0, &mut wsv) - .sign(alice_keys) - .expect("Valid"); + .sign(&alice_keys); // The first transaction should be confirmed assert!(valid_block.payload().transactions[0].error.is_none()); @@ -793,8 +778,7 @@ mod tests { let transaction_limits = &wsv.transaction_executor().transaction_limits; let tx = TransactionBuilder::new(chain_id.clone(), alice_id.clone()) .with_instructions([create_asset_definition]) - .sign(alice_keys.clone()) - .expect("Valid"); + .sign(&alice_keys); let tx = AcceptedTransaction::accept(tx, &chain_id, transaction_limits).expect("Valid"); let quantity: u32 = 200; @@ -812,14 +796,12 @@ mod tests { let tx0 = TransactionBuilder::new(chain_id.clone(), alice_id.clone()) .with_instructions([fail_mint]) - .sign(alice_keys.clone()) - .expect("Valid"); + .sign(&alice_keys); let tx0 = AcceptedTransaction::accept(tx0, &chain_id, transaction_limits).expect("Valid"); let tx2 = TransactionBuilder::new(chain_id.clone(), alice_id) .with_instructions([succeed_mint]) - .sign(alice_keys.clone()) - .expect("Valid"); + .sign(&alice_keys); let tx2 = AcceptedTransaction::accept(tx2, &chain_id, transaction_limits).expect("Valid"); // Creating a block of two identical transactions and validating it @@ -827,8 +809,7 @@ mod tests { let topology = Topology::new(UniqueVec::new()); let valid_block = BlockBuilder::new(transactions, topology, Vec::new()) .chain(0, &mut wsv) - .sign(alice_keys) - .expect("Valid"); + .sign(&alice_keys); // The first transaction should fail assert!(valid_block.payload().transactions[0].error.is_some()); @@ -870,14 +851,12 @@ mod tests { let instructions_accept: [InstructionBox; 2] = [create_domain.into(), create_asset.into()]; let tx_fail = TransactionBuilder::new(chain_id.clone(), alice_id.clone()) .with_instructions(instructions_fail) - .sign(alice_keys.clone()) - .expect("Valid"); + .sign(&alice_keys); let tx_fail = AcceptedTransaction::accept(tx_fail, &chain_id, transaction_limits).expect("Valid"); let tx_accept = TransactionBuilder::new(chain_id.clone(), alice_id) .with_instructions(instructions_accept) - .sign(alice_keys.clone()) - .expect("Valid"); + .sign(&alice_keys); let tx_accept = AcceptedTransaction::accept(tx_accept, &chain_id, transaction_limits).expect("Valid"); @@ -886,8 +865,7 @@ mod tests { let topology = Topology::new(UniqueVec::new()); let valid_block = BlockBuilder::new(transactions, topology, Vec::new()) .chain(0, &mut wsv) - .sign(alice_keys) - .expect("Valid"); + .sign(&alice_keys); // The first transaction should be rejected assert!( diff --git a/core/src/queue.rs b/core/src/queue.rs index 759d8e0ab7a..d67abd31972 100644 --- a/core/src/queue.rs +++ b/core/src/queue.rs @@ -391,7 +391,7 @@ mod tests { wsv::World, PeersIds, }; - fn accepted_tx(account_id: &str, key: KeyPair) -> AcceptedTransaction { + fn accepted_tx(account_id: &str, key: &KeyPair) -> AcceptedTransaction { let chain_id = ChainId::new("0"); let message = std::iter::repeat_with(rand::random::) @@ -403,8 +403,7 @@ mod tests { AccountId::from_str(account_id).expect("Valid"), ) .with_instructions(instructions) - .sign(key) - .expect("Failed to sign."); + .sign(key); let limits = TransactionLimits { max_instruction_number: 4096, max_wasm_size_bytes: 0, @@ -443,7 +442,7 @@ mod tests { }); queue - .push(accepted_tx("alice@wonderland", key_pair), &wsv) + .push(accepted_tx("alice@wonderland", &key_pair), &wsv) .expect("Failed to push tx into queue"); } @@ -470,13 +469,13 @@ mod tests { for _ in 0..max_txs_in_queue { queue - .push(accepted_tx("alice@wonderland", key_pair.clone()), &wsv) + .push(accepted_tx("alice@wonderland", &key_pair), &wsv) .expect("Failed to push tx into queue"); thread::sleep(Duration::from_millis(10)); } assert!(matches!( - queue.push(accepted_tx("alice@wonderland", key_pair), &wsv), + queue.push(accepted_tx("alice@wonderland", &key_pair), &wsv), Err(Failure { err: Error::Full, .. @@ -526,12 +525,9 @@ mod tests { max_wasm_size_bytes: 0, }; let fully_signed_tx: AcceptedTransaction = { - let mut signed_tx = tx - .clone() - .sign(key_pairs[0].clone()) - .expect("Failed to sign."); + let mut signed_tx = tx.clone().sign(&key_pairs[0]); for key_pair in &key_pairs[1..] { - signed_tx = signed_tx.sign(key_pair.clone()).expect("Failed to sign"); + signed_tx = signed_tx.sign(key_pair); } AcceptedTransaction::accept(signed_tx, &chain_id, &tx_limits) .expect("Failed to accept Transaction.") @@ -543,12 +539,8 @@ mod tests { )); let get_tx = |key_pair| { - AcceptedTransaction::accept( - tx.clone().sign(key_pair).expect("Failed to sign."), - &chain_id, - &tx_limits, - ) - .expect("Failed to accept Transaction.") + AcceptedTransaction::accept(tx.clone().sign(&key_pair), &chain_id, &tx_limits) + .expect("Failed to accept Transaction.") }; for key_pair in key_pairs { let partially_signed_tx: AcceptedTransaction = get_tx(key_pair); @@ -595,7 +587,7 @@ mod tests { }); for _ in 0..5 { queue - .push(accepted_tx("alice@wonderland", alice_key.clone()), &wsv) + .push(accepted_tx("alice@wonderland", &alice_key), &wsv) .expect("Failed to push tx into queue"); thread::sleep(Duration::from_millis(10)); } @@ -614,7 +606,7 @@ mod tests { kura, query_handle, ); - let tx = accepted_tx("alice@wonderland", alice_key); + let tx = accepted_tx("alice@wonderland", &alice_key); wsv.transactions.insert(tx.hash(), 1); let queue = Queue::from_configuration(&Configuration { transaction_time_to_live_ms: 100_000, @@ -644,7 +636,7 @@ mod tests { kura, query_handle, ); - let tx = accepted_tx("alice@wonderland", alice_key); + let tx = accepted_tx("alice@wonderland", &alice_key); let queue = Queue::from_configuration(&Configuration { transaction_time_to_live_ms: 100_000, max_transactions_in_queue: 100, @@ -683,13 +675,13 @@ mod tests { }); for _ in 0..(max_txs_in_block - 1) { queue - .push(accepted_tx("alice@wonderland", alice_key.clone()), &wsv) + .push(accepted_tx("alice@wonderland", &alice_key), &wsv) .expect("Failed to push tx into queue"); thread::sleep(Duration::from_millis(100)); } queue - .push(accepted_tx("alice@wonderland", alice_key.clone()), &wsv) + .push(accepted_tx("alice@wonderland", &alice_key), &wsv) .expect("Failed to push tx into queue"); std::thread::sleep(Duration::from_millis(101)); assert_eq!( @@ -700,7 +692,7 @@ mod tests { ); queue - .push(accepted_tx("alice@wonderland", alice_key), &wsv) + .push(accepted_tx("alice@wonderland", &alice_key), &wsv) .expect("Failed to push tx into queue"); std::thread::sleep(Duration::from_millis(210)); assert_eq!( @@ -732,7 +724,7 @@ mod tests { .expect("Default queue config should always build") }); queue - .push(accepted_tx("alice@wonderland", alice_key), &wsv) + .push(accepted_tx("alice@wonderland", &alice_key), &wsv) .expect("Failed to push tx into queue"); let a = queue @@ -751,6 +743,8 @@ mod tests { #[test] async fn custom_expired_transaction_is_rejected() { + const TTL_MS: u64 = 100; + let chain_id = ChainId::new("0"); let max_txs_in_block = 2; @@ -777,8 +771,8 @@ mod tests { AccountId::from_str("alice@wonderland").expect("Valid"), ) .with_instructions(instructions); - tx.set_ttl(Duration::from_millis(10)); - let tx = tx.sign(alice_key).expect("Failed to sign."); + tx.set_ttl(Duration::from_millis(TTL_MS)); + let tx = tx.sign(&alice_key); let limits = TransactionLimits { max_instruction_number: 4096, max_wasm_size_bytes: 0, @@ -790,7 +784,7 @@ mod tests { .expect("Failed to push tx into queue"); let mut txs = Vec::new(); let mut expired_txs = Vec::new(); - thread::sleep(Duration::from_millis(10)); + thread::sleep(Duration::from_millis(TTL_MS)); queue.get_transactions_for_block(&wsv, max_txs_in_block, &mut txs, &mut expired_txs); assert!(txs.is_empty()); assert_eq!(expired_txs.len(), 1); @@ -827,7 +821,7 @@ mod tests { // Spawn a thread where we push transactions thread::spawn(move || { while start_time.elapsed() < run_for { - let tx = accepted_tx("alice@wonderland", alice_key.clone()); + let tx = accepted_tx("alice@wonderland", &alice_key); match queue_arc_clone.push(tx, &wsv_clone) { Ok(()) | Err(Failure { @@ -891,7 +885,7 @@ mod tests { .expect("Default queue config should always build") }); - let tx = accepted_tx(alice_id, alice_key.clone()); + let tx = accepted_tx(alice_id, &alice_key); assert!(queue.push(tx.clone(), &wsv).is_ok()); // create the same tx but with timestamp in the future let tx = { @@ -904,7 +898,7 @@ mod tests { new_tx.set_creation_time(tx.0.payload().creation_time_ms + 2 * future_threshold_ms); - let new_tx = new_tx.sign(alice_key).expect("Failed to sign."); + let new_tx = new_tx.sign(&alice_key); let limits = TransactionLimits { max_instruction_number: 4096, max_wasm_size_bytes: 0, @@ -958,17 +952,11 @@ mod tests { // First push by Alice should be fine queue - .push( - accepted_tx("alice@wonderland", alice_key_pair.clone()), - &wsv, - ) + .push(accepted_tx("alice@wonderland", &alice_key_pair), &wsv) .expect("Failed to push tx into queue"); // Second push by Alice excide limit and will be rejected - let result = queue.push( - accepted_tx("alice@wonderland", alice_key_pair.clone()), - &wsv, - ); + let result = queue.push(accepted_tx("alice@wonderland", &alice_key_pair), &wsv); assert!( matches!( result, @@ -982,7 +970,7 @@ mod tests { // First push by Bob should be fine despite previous Alice error queue - .push(accepted_tx("bob@wonderland", bob_key_pair.clone()), &wsv) + .push(accepted_tx("bob@wonderland", &bob_key_pair), &wsv) .expect("Failed to push tx into queue"); let transactions = queue.collect_transactions_for_block(&wsv, 10); @@ -997,11 +985,11 @@ mod tests { // After cleanup Alice and Bob pushes should work fine queue - .push(accepted_tx("alice@wonderland", alice_key_pair), &wsv) + .push(accepted_tx("alice@wonderland", &alice_key_pair), &wsv) .expect("Failed to push tx into queue"); queue - .push(accepted_tx("bob@wonderland", bob_key_pair), &wsv) + .push(accepted_tx("bob@wonderland", &bob_key_pair), &wsv) .expect("Failed to push tx into queue"); } } diff --git a/core/src/smartcontracts/isi/query.rs b/core/src/smartcontracts/isi/query.rs index 2291930467e..ca8d4a90f35 100644 --- a/core/src/smartcontracts/isi/query.rs +++ b/core/src/smartcontracts/isi/query.rs @@ -270,14 +270,14 @@ mod tests { let instructions: [InstructionBox; 0] = []; let tx = TransactionBuilder::new(chain_id.clone(), ALICE_ID.clone()) .with_instructions(instructions) - .sign(ALICE_KEYS.clone())?; + .sign(&ALICE_KEYS); AcceptedTransaction::accept(tx, &chain_id, &limits)? }; let invalid_tx = { let isi = Fail::new("fail".to_owned()); let tx = TransactionBuilder::new(chain_id.clone(), ALICE_ID.clone()) .with_instructions([isi.clone(), isi]) - .sign(ALICE_KEYS.clone())?; + .sign(&ALICE_KEYS); AcceptedTransaction::accept(tx, &chain_id, &huge_limits)? }; @@ -287,7 +287,7 @@ mod tests { let topology = Topology::new(UniqueVec::new()); let first_block = BlockBuilder::new(transactions.clone(), topology.clone(), Vec::new()) .chain(0, &mut wsv) - .sign(ALICE_KEYS.clone())? + .sign(&ALICE_KEYS) .commit(&topology) .expect("Block is valid"); @@ -297,7 +297,7 @@ mod tests { for _ in 1u64..blocks { let block = BlockBuilder::new(transactions.clone(), topology.clone(), Vec::new()) .chain(0, &mut wsv) - .sign(ALICE_KEYS.clone())? + .sign(&ALICE_KEYS) .commit(&topology) .expect("Block is valid"); @@ -420,7 +420,7 @@ mod tests { let instructions: [InstructionBox; 0] = []; let tx = TransactionBuilder::new(chain_id.clone(), ALICE_ID.clone()) .with_instructions(instructions) - .sign(ALICE_KEYS.clone())?; + .sign(&ALICE_KEYS); let tx_limits = &wsv.transaction_executor().transaction_limits; let va_tx = AcceptedTransaction::accept(tx, &chain_id, tx_limits)?; @@ -428,7 +428,7 @@ mod tests { let topology = Topology::new(UniqueVec::new()); let vcb = BlockBuilder::new(vec![va_tx.clone()], topology.clone(), Vec::new()) .chain(0, &mut wsv) - .sign(ALICE_KEYS.clone())? + .sign(&ALICE_KEYS) .commit(&topology) .expect("Block is valid"); @@ -437,7 +437,7 @@ mod tests { let unapplied_tx = TransactionBuilder::new(chain_id, ALICE_ID.clone()) .with_instructions([Unregister::account("account@domain".parse().unwrap())]) - .sign(ALICE_KEYS.clone())?; + .sign(&ALICE_KEYS); let wrong_hash = unapplied_tx.hash(); let not_found = FindTransactionByHash::new(wrong_hash).execute(&wsv); assert!(matches!( diff --git a/core/src/smartcontracts/wasm.rs b/core/src/smartcontracts/wasm.rs index cde7fe6a624..cdc3ebd1d60 100644 --- a/core/src/smartcontracts/wasm.rs +++ b/core/src/smartcontracts/wasm.rs @@ -274,6 +274,10 @@ fn create_config() -> Result { .consume_fuel(true) .cache_config_load_default() .map_err(Error::Initialization)?; + #[cfg(feature = "profiling")] + { + config.profiler(wasmtime::ProfilingStrategy::PerfMap); + } Ok(config) } diff --git a/core/src/sumeragi/main_loop.rs b/core/src/sumeragi/main_loop.rs index 1fb04aa9e7b..19aa97bfb6a 100644 --- a/core/src/sumeragi/main_loop.rs +++ b/core/src/sumeragi/main_loop.rs @@ -196,9 +196,9 @@ impl Sumeragi { if let Some(msg) = block_msg.as_ref() { let vc_index : Option = match msg { BlockMessage::BlockCreated(bc) => Some(bc.block.payload().header.view_change_index), - BlockMessage::BlockSigned(_) => None, // Signed and Committed contain no block. - BlockMessage::BlockCommitted(_) => None, - BlockMessage::BlockSyncUpdate(_) => None, // Block sync updates are exempt from early pruning + // Signed and Committed contain no block. + // Block sync updates are exempt from early pruning. + BlockMessage::BlockSigned(_) | BlockMessage::BlockCommitted(_) | BlockMessage::BlockSyncUpdate(_) => None, }; if let Some(vc_index) = vc_index { if vc_index < current_view_change_index { @@ -292,8 +292,7 @@ impl Sumeragi { let mut new_wsv = self.wsv.clone(); let genesis = BlockBuilder::new(transactions, self.current_topology.clone(), vec![]) .chain(0, &mut new_wsv) - .sign(self.key_pair.clone()) - .expect("Genesis signing failed"); + .sign(&self.key_pair); let genesis_msg = BlockCreated::from(genesis.clone()).into(); @@ -423,9 +422,7 @@ impl Sumeragi { } }; - let signed_block = block - .sign(self.key_pair.clone()) - .expect("Block signing failed"); + let signed_block = block.sign(&self.key_pair); Some(VotingBlock::new(signed_block, new_wsv)) } @@ -641,27 +638,27 @@ impl Sumeragi { if cache_full || (deadline_reached && cache_non_empty) { let transactions = self.transaction_cache.clone(); info!(%addr, txns=%transactions.len(), "Creating block..."); + let create_block_start_time = Instant::now(); // TODO: properly process triggers! let mut new_wsv = self.wsv.clone(); let event_recommendations = Vec::new(); - let new_block = match BlockBuilder::new( + let new_block = BlockBuilder::new( transactions, self.current_topology.clone(), event_recommendations, ) .chain(current_view_change_index, &mut new_wsv) - .sign(self.key_pair.clone()) - { - Ok(block) => block, - Err(error) => { - error!(?error, "Failed to sign block"); - return; - } - }; + .sign(&self.key_pair); + let created_in = create_block_start_time.elapsed(); if let Some(current_topology) = current_topology.is_consensus_required() { - info!(%addr, block_payload_hash=%new_block.payload().hash(), "Block created"); + info!(%addr, created_in_ms=%created_in.as_millis(), block_payload_hash=%new_block.payload().hash(), "Block created"); + + if created_in > self.pipeline_time() / 2 { + warn!("Creating block takes too much time. This might prevent consensus from operating. Consider increasing `commit_time` or decreasing `max_transactions_in_block`"); + } + *voting_block = Some(VotingBlock::new(new_block.clone(), new_wsv)); let msg = BlockCreated::from(new_block).into(); @@ -937,8 +934,7 @@ pub(crate) fn run( let suspect_proof = ProofBuilder::new(sumeragi.wsv.latest_block_hash(), current_view_change_index) - .sign(sumeragi.key_pair.clone()) - .expect("Proof signing failed"); + .sign(&sumeragi.key_pair); view_change_proof_chain .insert_proof( @@ -1193,7 +1189,7 @@ mod tests { fn create_data_for_test( chain_id: &ChainId, topology: &Topology, - leader_key_pair: KeyPair, + leader_key_pair: &KeyPair, ) -> (WorldStateView, Arc, SignedBlock) { // Predefined world state let alice_id: AccountId = "alice@wonderland".parse().expect("Valid"); @@ -1215,8 +1211,7 @@ mod tests { // Making two transactions that have the same instruction let tx = TransactionBuilder::new(chain_id.clone(), alice_id.clone()) .with_instructions([fail_box]) - .sign(alice_keys.clone()) - .expect("Valid"); + .sign(&alice_keys); let tx = AcceptedTransaction::accept( tx, chain_id, @@ -1227,8 +1222,7 @@ mod tests { // Creating a block of two identical transactions and validating it let block = BlockBuilder::new(vec![tx.clone(), tx], topology.clone(), Vec::new()) .chain(0, &mut wsv) - .sign(leader_key_pair.clone()) - .expect("Block is valid"); + .sign(leader_key_pair); let genesis = block.commit(topology).expect("Block is valid"); wsv.apply(&genesis).expect("Failed to apply block"); @@ -1244,8 +1238,7 @@ mod tests { let tx1 = TransactionBuilder::new(chain_id.clone(), alice_id.clone()) .with_instructions([create_asset_definition1]) - .sign(alice_keys.clone()) - .expect("Valid"); + .sign(&alice_keys); let tx1 = AcceptedTransaction::accept( tx1, chain_id, @@ -1255,8 +1248,7 @@ mod tests { .expect("Valid"); let tx2 = TransactionBuilder::new(chain_id.clone(), alice_id) .with_instructions([create_asset_definition2]) - .sign(alice_keys) - .expect("Valid"); + .sign(&alice_keys); let tx2 = AcceptedTransaction::accept( tx2, chain_id, @@ -1268,8 +1260,7 @@ mod tests { // Creating a block of two identical transactions and validating it let block = BlockBuilder::new(vec![tx1, tx2], topology.clone(), Vec::new()) .chain(0, &mut wsv.clone()) - .sign(leader_key_pair) - .expect("Block is valid"); + .sign(leader_key_pair); (wsv, kura, block.into()) } @@ -1281,11 +1272,11 @@ mod tests { let leader_key_pair = KeyPair::generate().unwrap(); let topology = Topology::new(unique_vec![PeerId::new( - &"127.0.0.1:8080".parse().unwrap(), - leader_key_pair.public_key(), + "127.0.0.1:8080".parse().unwrap(), + leader_key_pair.public_key().clone(), )]); let (finalized_wsv, _, mut block) = - create_data_for_test(&chain_id, &topology, leader_key_pair); + create_data_for_test(&chain_id, &topology, &leader_key_pair); let wsv = finalized_wsv.clone(); // Malform block to make it invalid @@ -1301,11 +1292,11 @@ mod tests { let leader_key_pair = KeyPair::generate().unwrap(); let topology = Topology::new(unique_vec![PeerId::new( - &"127.0.0.1:8080".parse().unwrap(), - leader_key_pair.public_key(), + "127.0.0.1:8080".parse().unwrap(), + leader_key_pair.public_key().clone(), )]); let (finalized_wsv, kura, mut block) = - create_data_for_test(&chain_id, &topology, leader_key_pair); + create_data_for_test(&chain_id, &topology, &leader_key_pair); let mut wsv = finalized_wsv.clone(); let validated_block = @@ -1333,7 +1324,7 @@ mod tests { let topology = Topology::new(UniqueVec::new()); let leader_key_pair = KeyPair::generate().unwrap(); let (finalized_wsv, _, mut block) = - create_data_for_test(&chain_id, &topology, leader_key_pair); + create_data_for_test(&chain_id, &topology, &leader_key_pair); let wsv = finalized_wsv.clone(); // Change block height @@ -1359,10 +1350,11 @@ mod tests { let leader_key_pair = KeyPair::generate().unwrap(); let topology = Topology::new(unique_vec![PeerId::new( - &"127.0.0.1:8080".parse().unwrap(), - leader_key_pair.public_key(), + "127.0.0.1:8080".parse().unwrap(), + leader_key_pair.public_key().clone(), )]); - let (finalized_wsv, _, block) = create_data_for_test(&chain_id, &topology, leader_key_pair); + let (finalized_wsv, _, block) = + create_data_for_test(&chain_id, &topology, &leader_key_pair); let wsv = finalized_wsv.clone(); let result = handle_block_sync(&chain_id, block, &wsv, &finalized_wsv); assert!(matches!(result, Ok(BlockSyncOk::CommitBlock(_, _)))) @@ -1374,11 +1366,11 @@ mod tests { let leader_key_pair = KeyPair::generate().unwrap(); let topology = Topology::new(unique_vec![PeerId::new( - &"127.0.0.1:8080".parse().unwrap(), - leader_key_pair.public_key(), + "127.0.0.1:8080".parse().unwrap(), + leader_key_pair.public_key().clone(), )]); let (finalized_wsv, kura, mut block) = - create_data_for_test(&chain_id, &topology, leader_key_pair); + create_data_for_test(&chain_id, &topology, &leader_key_pair); let mut wsv = finalized_wsv.clone(); let validated_block = @@ -1402,11 +1394,11 @@ mod tests { let leader_key_pair = KeyPair::generate().unwrap(); let topology = Topology::new(unique_vec![PeerId::new( - &"127.0.0.1:8080".parse().unwrap(), - leader_key_pair.public_key(), + "127.0.0.1:8080".parse().unwrap(), + leader_key_pair.public_key().clone(), )]); let (finalized_wsv, kura, mut block) = - create_data_for_test(&chain_id, &topology, leader_key_pair); + create_data_for_test(&chain_id, &topology, &leader_key_pair); let mut wsv = finalized_wsv.clone(); // Increase block view change index @@ -1444,7 +1436,7 @@ mod tests { let topology = Topology::new(UniqueVec::new()); let leader_key_pair = KeyPair::generate().unwrap(); let (finalized_wsv, _, mut block) = - create_data_for_test(&chain_id, &topology, leader_key_pair); + create_data_for_test(&chain_id, &topology, &leader_key_pair); let wsv = finalized_wsv.clone(); // Change block height and view change index diff --git a/core/src/sumeragi/network_topology.rs b/core/src/sumeragi/network_topology.rs index 4ba77806e45..35328b6298f 100644 --- a/core/src/sumeragi/network_topology.rs +++ b/core/src/sumeragi/network_topology.rs @@ -92,19 +92,19 @@ impl Topology { for role in roles { match (role, self.is_non_empty(), self.is_consensus_required()) { (Role::Leader, Some(topology), _) => { - public_keys.insert(&topology.leader().public_key); + public_keys.insert(topology.leader().public_key()); } (Role::ProxyTail, _, Some(topology)) => { public_keys.insert(&topology.proxy_tail().public_key); } (Role::ValidatingPeer, _, Some(topology)) => { for peer in topology.validating_peers() { - public_keys.insert(&peer.public_key); + public_keys.insert(peer.public_key()); } } (Role::ObservingPeer, _, Some(topology)) => { for peer in topology.observing_peers() { - public_keys.insert(&peer.public_key); + public_keys.insert(peer.public_key()); } } _ => {} @@ -194,6 +194,26 @@ impl Topology { // Rotate all once for every view_change topology.rotate_all_n(view_change_index); + { + // FIXME: This is a hack to prevent consensus from running amock due to + // a bug in the implementation by reverting to predictable ordering + + let view_change_limit: usize = view_change_index + .saturating_sub(10) + .try_into() + .expect("u64 must fit into usize"); + + if view_change_limit > 1 { + iroha_logger::error!("Restarting consensus(internal bug). Report to developers"); + let mut peers: Vec<_> = topology.ordered_peers.iter().cloned().collect(); + + peers.sort(); + let peers_count = peers.len(); + peers.rotate_right(view_change_limit % peers_count); + topology = Topology::new(peers.into_iter().collect()); + } + } + topology } @@ -265,7 +285,7 @@ macro_rules! test_peers { }}; ($($id:literal),+$(,)?: $key_pair_iter:expr) => { ::iroha_primitives::unique_vec![ - $(PeerId::new(&(([0, 0, 0, 0], $id).into()), $key_pair_iter.next().expect("Not enough key pairs").public_key())),+ + $(PeerId::new(([0, 0, 0, 0], $id).into(), $key_pair_iter.next().expect("Not enough key pairs").public_key().clone())),+ ] }; } @@ -344,7 +364,7 @@ mod tests { let dummy = "value to sign"; let signatures = key_pairs .iter() - .map(|key_pair| SignatureOf::new(key_pair.clone(), &dummy).expect("Failed to sign")) + .map(|key_pair| SignatureOf::new(key_pair, &dummy)) .collect::>>(); let leader_signatures = @@ -386,7 +406,7 @@ mod tests { let dummy = "value to sign"; let signatures = key_pairs .iter() - .map(|key_pair| SignatureOf::new(key_pair.clone(), &dummy).expect("Failed to sign")) + .map(|key_pair| SignatureOf::new(key_pair, &dummy)) .collect::>>(); let leader_signatures = @@ -419,7 +439,7 @@ mod tests { let dummy = "value to sign"; let signatures = key_pairs .iter() - .map(|key_pair| SignatureOf::new(key_pair.clone(), &dummy).expect("Failed to sign")) + .map(|key_pair| SignatureOf::new(key_pair, &dummy)) .collect::>>(); let leader_signatures = @@ -453,7 +473,7 @@ mod tests { let dummy = "value to sign"; let signatures = key_pairs .iter() - .map(|key_pair| SignatureOf::new(key_pair.clone(), &dummy).expect("Failed to sign")) + .map(|key_pair| SignatureOf::new(key_pair, &dummy)) .collect::>>(); let leader_signatures = @@ -488,7 +508,7 @@ mod tests { let dummy = "value to sign"; let signatures = key_pairs .iter() - .map(|key_pair| SignatureOf::new(key_pair.clone(), &dummy).expect("Failed to sign")) + .map(|key_pair| SignatureOf::new(key_pair, &dummy)) .collect::>>(); let leader_signatures = diff --git a/core/src/sumeragi/view_change.rs b/core/src/sumeragi/view_change.rs index 0b0ed73032c..9a24f0ece33 100644 --- a/core/src/sumeragi/view_change.rs +++ b/core/src/sumeragi/view_change.rs @@ -4,7 +4,7 @@ use derive_more::{Deref, DerefMut}; use eyre::Result; use indexmap::IndexSet; -use iroha_crypto::{HashOf, KeyPair, PublicKey, SignatureOf, SignaturesOf}; +use iroha_crypto::{HashOf, KeyPair, SignatureOf, SignaturesOf}; use iroha_data_model::{block::SignedBlock, prelude::PeerId}; use parity_scale_codec::{Decode, Encode}; use thiserror::Error; @@ -54,13 +54,10 @@ impl ProofBuilder { } /// Sign this message with the peer's public and private key. - /// - /// # Errors - /// Can fail during creation of signature - pub fn sign(mut self, key_pair: KeyPair) -> Result { - let signature = SignatureOf::new(key_pair, &self.0.payload)?; + pub fn sign(mut self, key_pair: &KeyPair) -> SignedProof { + let signature = SignatureOf::new(key_pair, &self.0.payload); self.0.signatures.insert(signature); - Ok(self.0) + self.0 } } @@ -76,8 +73,7 @@ impl SignedProof { /// Verify if the proof is valid, given the peers in `topology`. fn verify(&self, peers: &[PeerId], max_faults: usize) -> bool { - let peer_public_keys: IndexSet<&PublicKey> = - peers.iter().map(|peer_id| &peer_id.public_key).collect(); + let peer_public_keys: IndexSet<_> = peers.iter().map(PeerId::public_key).collect(); let valid_count = self .signatures diff --git a/core/test_network/src/lib.rs b/core/test_network/src/lib.rs index d138addc7aa..c88f0ba1d55 100644 --- a/core/test_network/src/lib.rs +++ b/core/test_network/src/lib.rs @@ -486,10 +486,7 @@ impl Peer { let key_pair = KeyPair::generate()?; let p2p_address = local_unique_port()?; let api_address = local_unique_port()?; - let id = PeerId { - address: p2p_address.clone(), - public_key: key_pair.public_key().clone(), - }; + let id = PeerId::new(p2p_address.clone(), key_pair.public_key().clone()); let shutdown = None; Ok(Self { id, diff --git a/crypto/Cargo.toml b/crypto/Cargo.toml index d3f9684b8b2..4cff389c997 100644 --- a/crypto/Cargo.toml +++ b/crypto/Cargo.toml @@ -11,28 +11,25 @@ license.workspace = true workspace = true [features] -default = ["std"] +default = ["std", "rand"] # Enable static linkage of the rust standard library. # Please refer to https://docs.rust-embedded.org/book/intro/no-std.html std = [ - "dep:blake2", - "dep:digest", - "dep:sha2", - "dep:hkdf", - "dep:amcl", - "dep:amcl_wrapper", - "dep:signature", - "dep:ed25519-dalek", - "dep:curve25519-dalek", - "dep:x25519-dalek", - "dep:rand", - "dep:rand_chacha", - "dep:zeroize", - "dep:arrayref", - "dep:aead", - "dep:chacha20poly1305", - "dep:elliptic-curve", - "dep:k256", + "rand/std", + "blake2/std", + "digest/std", + "sha2/std", + "hkdf/std", + "w3f-bls/std", + "signature/std", + "ed25519-dalek/std", + "rand/std", + "rand_chacha/std", + "zeroize/std", + "aead/std", + "chacha20poly1305/std", + "elliptic-curve/std", + "k256/std", "dep:thiserror", "displaydoc/std", ] @@ -42,6 +39,9 @@ std = [ # Expose FFI API for dynamic linking (Internal use only) ffi_export = ["std", "iroha_ffi", "iroha_primitives/ffi_export"] +# Allow creating key using random number generator which is tricky in some environments like Smart Contracts +rand = [] + [dependencies] iroha_primitives = { workspace = true } iroha_macro = { workspace = true } @@ -58,37 +58,37 @@ getset = { workspace = true } thiserror = { version = "1.0.50", optional = true } displaydoc = { version = "0.2.4", default-features = false } -digest = { version = "0.10.7", optional = true } -blake2 = { version = "0.10.6", optional = true } -sha2 = { version = "0.10.8", optional = true } -hkdf = { version = "0.12.3", optional = true } -amcl = { version = "0.2.0", optional = true, default-features = false, features = ["secp256k1"] } -amcl_wrapper = { version = "0.4.0", optional = true } +digest = { version = "0.10.7", default-features = false, features = ["alloc"]} +blake2 = { version = "0.10.6", default-features = false } +sha2 = { version = "0.10.8", default-features = false } +hkdf = { version = "0.12.3", default-features = false } +w3f-bls = { version = "0.1.3", default-features = false } -signature = { version = "2.1.0", optional = true } -ed25519-dalek = { version = "2.0.0", optional = true, features = ["rand_core"] } -curve25519-dalek = { version = "4.1.1", optional = true } -x25519-dalek = { version = "2.0.0", optional = true, features = ["static_secrets"] } +signature = { version = "2.1.0", default-features = false, features = ["alloc"] } +ed25519-dalek = { version = "2.1.0", default-features = false, features = ["alloc", "rand_core", "zeroize"] } +curve25519-dalek = { version = "4.1.1", default-features = false } +x25519-dalek = { version = "2.0.0", default-features = false, features = ["static_secrets"] } -rand = { workspace = true, optional = true } -rand_chacha = { version = "0.3.1", optional = true } +rand = { workspace = true, default-features = false, features = ["std_rng", "alloc"]} +rand_chacha = { version = "0.3.1", default-features = false } -zeroize = { version = "1.6.0", optional = true } -arrayref = { version = "0.3.7", optional = true } +zeroize = { version = "1.6.0", default-features = false } +arrayref = { version = "0.3.7", default-features = false } -aead = { version = "0.5.2", optional = true } -chacha20poly1305 = { version = "0.10.1", optional = true } +aead = { version = "0.5.2", default-features = false, features = ["alloc"] } +chacha20poly1305 = { version = "0.10.1", default-features = false } -elliptic-curve = { version = "0.13.6", optional = true } -k256 = { version = "0.13.1", optional = true, features = ["ecdsa", "sha256"]} +elliptic-curve = { version = "0.13.6", default-features = false } +k256 = { version = "0.13.1", default-features = false, features = ["alloc", "ecdsa", "sha256"]} [dev-dependencies] hex-literal = { workspace = true } -serde_json = { workspace = true } +serde_json = { workspace = true, features = ["std"]} # these crypto libraries are not used to implement actual crypto algorithms # but to test some of the primitives against them +amcl = { version = "0.2.0", default-features = false, features = ["secp256k1"] } secp256k1 = { version = "0.28.0", features = ["rand", "serde"] } libsodium-sys-stable = "1.20.3" openssl = { version = "0.10.59", features = ["vendored"] } diff --git a/crypto/src/encryption/chacha20poly1305.rs b/crypto/src/encryption/chacha20poly1305.rs index 5def1a5932a..dbb158afdd9 100644 --- a/crypto/src/encryption/chacha20poly1305.rs +++ b/crypto/src/encryption/chacha20poly1305.rs @@ -1,3 +1,6 @@ +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + use aead::{ generic_array::{ typenum::{U0, U12, U16, U32, U36}, diff --git a/crypto/src/encryption/mod.rs b/crypto/src/encryption/mod.rs index d72deacc7fb..63be1eab7f2 100644 --- a/crypto/src/encryption/mod.rs +++ b/crypto/src/encryption/mod.rs @@ -17,22 +17,25 @@ mod chacha20poly1305; +#[cfg(not(feature = "std"))] +use alloc::{vec, vec::Vec}; + use aead::{ generic_array::{typenum::Unsigned, ArrayLength, GenericArray}, Aead, Error as AeadError, KeyInit, Payload, }; use displaydoc::Display; use rand::{rngs::OsRng, RngCore}; -use thiserror::Error; pub use self::chacha20poly1305::ChaCha20Poly1305; use crate::SessionKey; /// An error that can occur during encryption or decryption -#[derive(Error, Display, Debug)] +#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Display, Debug)] pub enum Error { /// Failed to generate nonce for an encryption operation - NonceGeneration(#[source] rand::Error), + NonceGeneration(#[cfg_attr(feature = "std", source)] rand::Error), /// Failed to encrypt data Encryption(AeadError), /// Failed to decrypt data @@ -68,10 +71,8 @@ fn random_bytes>() -> Result, Error> { /// let encryptor = SymmetricEncryptor::::new_with_key(&key); /// let aad = b"Using ChaCha20Poly1305 to encrypt data"; /// let message = b"Hidden message"; -/// let res = encryptor.encrypt_easy(aad.as_ref(), message.as_ref()); -/// assert!(res.is_ok()); +/// let ciphertext = encryptor.encrypt_easy(aad.as_ref(), message.as_ref()).unwrap(); /// -/// let ciphertext = res.unwrap(); /// let res = encryptor.decrypt_easy(aad.as_ref(), ciphertext.as_slice()); /// assert_eq!(res.unwrap().as_slice(), message); /// ``` diff --git a/crypto/src/hash.rs b/crypto/src/hash.rs index 86505f7d202..0351c9d71c1 100644 --- a/crypto/src/hash.rs +++ b/crypto/src/hash.rs @@ -2,20 +2,18 @@ use alloc::{borrow::ToOwned as _, format, string::String, vec, vec::Vec}; use core::{hash, marker::PhantomData, num::NonZeroU8, str::FromStr}; -#[cfg(all(feature = "std", not(feature = "ffi_import")))] +#[cfg(not(feature = "ffi_import"))] use blake2::{ digest::{Update, VariableOutput}, Blake2bVar, }; use derive_more::{DebugCustom, Deref, DerefMut, Display}; -#[cfg(any(feature = "std", feature = "ffi_import"))] -use iroha_macro::ffi_impl_opaque; use iroha_schema::{IntoSchema, TypeId}; use parity_scale_codec::{Decode, Encode}; use serde::{Deserialize, Serialize}; use serde_with::DeserializeFromStr; -use crate::{error::Error, hex_decode}; +use crate::{hex_decode, ParseError}; /// Hash of Iroha entities. Currently supports only blake2b-32. /// The least significant bit of hash is set to 1. @@ -64,8 +62,6 @@ impl Hash { } } -#[cfg(any(feature = "std", feature = "ffi_import"))] -#[ffi_impl_opaque] impl Hash { /// Hash the given bytes. #[must_use] @@ -142,11 +138,11 @@ impl Encode for Hash { } impl FromStr for Hash { - type Err = Error; + type Err = ParseError; fn from_str(key: &str) -> Result { let hash: [u8; Self::LENGTH] = hex_decode(key)?.try_into().map_err(|hash_vec| { - Error::Parse(format!( + ParseError(format!( "Unable to parse {hash_vec:?} as [u8; {}]", Self::LENGTH )) @@ -154,7 +150,7 @@ impl FromStr for Hash { Hash::is_lsb_1(&hash) .then_some(hash) - .ok_or_else(|| Error::Parse("expect least significant bit of hash to be 1".to_owned())) + .ok_or_else(|| ParseError("expect least significant bit of hash to be 1".to_owned())) .map(Self::prehashed) } } @@ -221,6 +217,7 @@ impl Clone for HashOf { } impl Copy for HashOf {} +#[allow(clippy::unconditional_recursion)] // False-positive impl PartialEq for HashOf { fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) @@ -269,7 +266,6 @@ impl HashOf { } } -#[cfg(any(feature = "std", feature = "ffi_import"))] impl HashOf { /// Construct typed hash #[must_use] @@ -318,13 +314,9 @@ mod ffi { #[cfg(test)] mod tests { - #[cfg(feature = "std")] - #[cfg(not(feature = "ffi_import"))] use super::*; #[test] - #[cfg(feature = "std")] - #[cfg(not(feature = "ffi_import"))] fn blake2_32b() { let mut hasher = Blake2bVar::new(32).unwrap(); hasher.update(&hex_literal::hex!("6920616d2064617461")); diff --git a/crypto/src/kex/mod.rs b/crypto/src/kex/mod.rs index f76bc4ac090..37e9696c9e7 100644 --- a/crypto/src/kex/mod.rs +++ b/crypto/src/kex/mod.rs @@ -14,25 +14,27 @@ use crate::{Error, KeyGenOption, PrivateKey, PublicKey, SessionKey}; pub trait KeyExchangeScheme { /// Generate a new instance of the scheme fn new() -> Self; + /// Create new keypairs. If - /// `options` is None, the keys are generated ephemerally from the `OsRng` - /// `options` is `UseSeed`, the keys are generated ephemerally from the sha256 hash of the seed which is - /// then used to seed the `ChaChaRng` - /// `options` is `FromPrivateKey`, the corresponding public key is returned. This should be used for + /// - `options` is [`Random`](KeyGenOption::Random), the keys are generated ephemerally from the [`OsRng`](rand::rngs::OsRng) + /// - `options` is [`UseSeed`](KeyGenOption::UseSeed), the keys are generated ephemerally from the sha256 hash of the seed which is + /// then used to seed the [`ChaChaRng`](rand_chacha::ChaChaRng) + /// - `options` is [`FromPrivateKey`](KeyGenOption::FromPrivateKey), the corresponding public key is returned. This should be used for /// static Diffie-Hellman and loading a long-term key. - /// - /// # Errors - /// - /// Returns an error if the key generation fails. - fn keypair(&self, options: Option) -> Result<(PublicKey, PrivateKey), Error>; + fn keypair(&self, options: KeyGenOption) -> (PublicKey, PrivateKey); + /// Compute the diffie-hellman shared secret. /// `local_private_key` is the key generated from calling `keypair` while /// `remote_public_key` is the key received from a different call to `keypair` from another party. + /// + /// # Errors + /// + /// Returns an error if the computation fails, i.e. remote key is invalid. fn compute_shared_secret( &self, local_private_key: &PrivateKey, remote_public_key: &PublicKey, - ) -> SessionKey; + ) -> Result; /// Size of the shared secret in bytes. const SHARED_SECRET_SIZE: usize; diff --git a/crypto/src/kex/x25519.rs b/crypto/src/kex/x25519.rs index 63e5c7f6325..d6c9b6e97f5 100644 --- a/crypto/src/kex/x25519.rs +++ b/crypto/src/kex/x25519.rs @@ -1,15 +1,19 @@ +#[cfg(not(feature = "std"))] +use alloc::{borrow::ToOwned as _, boxed::Box}; +use core::borrow::Borrow as _; + use arrayref::array_ref; use iroha_primitives::const_vec::ConstVec; -use rand::{rngs::OsRng, SeedableRng}; +#[cfg(feature = "rand")] +use rand::rngs::OsRng; +use rand::SeedableRng; use rand_chacha::ChaChaRng; use sha2::Digest; use x25519_dalek::{PublicKey as X25519PublicKey, StaticSecret}; use zeroize::Zeroize; -const ALGORITHM: Algorithm = Algorithm::Ed25519; - use super::KeyExchangeScheme; -use crate::{Algorithm, Error, KeyGenOption, PrivateKey, PublicKey, SessionKey}; +use crate::{Error, KeyGenOption, ParseError, PrivateKey, PublicKey, SessionKey}; /// Implements the [`KeyExchangeScheme`] using X25519 key exchange and SHA256 hash function. #[derive(Copy, Clone)] @@ -20,53 +24,87 @@ impl KeyExchangeScheme for X25519Sha256 { Self } - fn keypair(&self, mut option: Option) -> Result<(PublicKey, PrivateKey), Error> { + /// # Note about implementation + /// + /// We encode the `X25519` public key as an [`Ed25519`](PublicKey::Ed25519) public key which is + /// a not so good idea, because we have to do extra computations and extra error handling. + /// + /// See #4174 for more details. + fn keypair(&self, mut option: KeyGenOption) -> (PublicKey, PrivateKey) { let (pk, sk) = match option { - Some(KeyGenOption::UseSeed(ref mut s)) => { - let hash = sha2::Sha256::digest(s.as_slice()); - s.zeroize(); - let rng = ChaChaRng::from_seed(*array_ref!(hash.as_slice(), 0, 32)); + #[cfg(feature = "rand")] + KeyGenOption::Random => { + let rng = OsRng; let sk = StaticSecret::random_from_rng(rng); let pk = X25519PublicKey::from(&sk); (pk, sk) } - Some(KeyGenOption::FromPrivateKey(ref s)) => { - assert_eq!(s.digest_function, ALGORITHM); - let sk = StaticSecret::from(*array_ref!(&s.payload, 0, 32)); + KeyGenOption::UseSeed(ref mut s) => { + let hash = sha2::Sha256::digest(s.as_slice()); + s.zeroize(); + let rng = ChaChaRng::from_seed(*array_ref!(hash.as_slice(), 0, 32)); + let sk = StaticSecret::random_from_rng(rng); let pk = X25519PublicKey::from(&sk); (pk, sk) } - None => { - let rng = OsRng; - let sk = StaticSecret::random_from_rng(rng); + KeyGenOption::FromPrivateKey(ref s) => { + let crate::PrivateKeyInner::Ed25519(s) = s.0.borrow() else { + panic!("Wrong private key type, expected `Ed25519`, got {s:?}") + }; + let sk = StaticSecret::from(*array_ref!(s.as_bytes(), 0, 32)); let pk = X25519PublicKey::from(&sk); (pk, sk) } }; - Ok(( - PublicKey { - digest_function: ALGORITHM, - payload: ConstVec::new(pk.as_bytes().to_vec()), - }, - PrivateKey { - digest_function: ALGORITHM, - payload: ConstVec::new(sk.to_bytes().to_vec()), - }, - )) + + let montgomery = curve25519_dalek::MontgomeryPoint(pk.to_bytes()); + // 0 here means the positive sign, but it doesn't matter, because in + // `compute_shared_secret()` we convert it back to Montgomery form losing the sign. + let edwards = montgomery + .to_edwards(0) + .expect("Montgomery to Edwards conversion failed"); + let edwards_compressed = edwards.compress(); + + ( + PublicKey(Box::new(crate::PublicKeyInner::Ed25519( + crate::ed25519::PublicKey::from_bytes(edwards_compressed.as_bytes()).expect( + "Ed25519 public key should be possible to create from X25519 public key", + ), + ))), + PrivateKey(Box::new(crate::PrivateKeyInner::Ed25519( + crate::ed25519::PrivateKey::from_bytes(sk.as_bytes()), + ))), + ) } fn compute_shared_secret( &self, local_private_key: &PrivateKey, remote_public_key: &PublicKey, - ) -> SessionKey { - assert_eq!(local_private_key.digest_function, ALGORITHM); - assert_eq!(remote_public_key.digest_function, ALGORITHM); - let sk = StaticSecret::from(*array_ref!(&local_private_key.payload, 0, 32)); - let pk = X25519PublicKey::from(*array_ref!(&remote_public_key.payload, 0, 32)); + ) -> Result { + let crate::PrivateKeyInner::Ed25519(local_private_key) = local_private_key.0.borrow() + else { + panic!("Wrong private key type, expected `Ed25519`, got {local_private_key:?}") + }; + let crate::PublicKeyInner::Ed25519(remote_public_key) = remote_public_key.0.borrow() else { + panic!("Wrong public key type, expected `Ed25519`, got {remote_public_key:?}") + }; + + let sk = StaticSecret::from(*local_private_key.as_bytes()); + + let pk_slice: &[u8; 32] = remote_public_key.as_bytes(); + let edwards_compressed = + curve25519_dalek::edwards::CompressedEdwardsY::from_slice(pk_slice) + .expect("Ed25519 public key has 32 bytes"); + let edwards = edwards_compressed.decompress().ok_or_else(|| { + ParseError("Invalid public key: failed to decompress edwards point".to_owned()) + })?; + let montgomery = edwards.to_montgomery(); + let pk = X25519PublicKey::from(montgomery.to_bytes()); + let shared_secret = sk.diffie_hellman(&pk); let hash = sha2::Sha256::digest(shared_secret.as_bytes()); - SessionKey(ConstVec::new(hash.as_slice().to_vec())) + Ok(SessionKey(ConstVec::new(hash.as_slice().to_vec()))) } const SHARED_SECRET_SIZE: usize = 32; @@ -81,17 +119,19 @@ mod tests { #[test] fn key_exchange() { let scheme = X25519Sha256::new(); - let (public_key1, secret_key1) = scheme.keypair(None).unwrap(); - let _res = scheme.compute_shared_secret(&secret_key1, &public_key1); - let res = scheme.keypair(None); - let (public_key2, secret_key2) = res.unwrap(); - let _res = scheme.compute_shared_secret(&secret_key2, &public_key1); - let _res = scheme.compute_shared_secret(&secret_key1, &public_key2); - - let (public_key2, secret_key1) = scheme - .keypair(Some(KeyGenOption::FromPrivateKey(secret_key1))) + let (public_key1, secret_key1) = scheme.keypair(KeyGenOption::Random); + + let (public_key2, secret_key2) = scheme.keypair(KeyGenOption::Random); + let shared_secret1 = scheme + .compute_shared_secret(&secret_key2, &public_key1) .unwrap(); + let shared_secret2 = scheme + .compute_shared_secret(&secret_key1, &public_key2) + .unwrap(); + assert_eq!(shared_secret1.payload(), shared_secret2.payload()); + + let (public_key2, _secret_key1) = + scheme.keypair(KeyGenOption::FromPrivateKey(Box::new(secret_key1))); assert_eq!(public_key2, public_key1); - assert_eq!(secret_key1, secret_key1); } } diff --git a/crypto/src/lib.rs b/crypto/src/lib.rs index 5652c2965a8..0db94fcbb37 100755 --- a/crypto/src/lib.rs +++ b/crypto/src/lib.rs @@ -4,11 +4,9 @@ #[cfg(not(feature = "std"))] extern crate alloc; -#[cfg(feature = "std")] #[cfg(not(feature = "ffi_import"))] pub mod encryption; mod hash; -#[cfg(feature = "std")] #[cfg(not(feature = "ffi_import"))] pub mod kex; mod merkle; @@ -20,31 +18,32 @@ mod varint; #[cfg(not(feature = "std"))] use alloc::{ + borrow::ToOwned as _, + boxed::Box, format, string::{String, ToString as _}, + vec, vec::Vec, }; -use core::{fmt, str::FromStr}; +use core::{borrow::Borrow, fmt, str::FromStr}; #[cfg(feature = "base64")] pub use base64; -#[cfg(feature = "std")] #[cfg(not(feature = "ffi_import"))] pub use blake2; -use derive_more::{DebugCustom, Display}; -use error::{Error, NoSuchAlgorithm}; -use getset::{CopyGetters, Getters}; +use derive_more::Display; +use error::{Error, NoSuchAlgorithm, ParseError}; +use getset::Getters; pub use hash::*; use iroha_macro::ffi_impl_opaque; use iroha_primitives::const_vec::ConstVec; -use iroha_schema::IntoSchema; +use iroha_schema::{Declaration, IntoSchema, MetaMap, Metadata, NamedFieldsMeta, TypeId}; pub use merkle::MerkleTree; #[cfg(not(feature = "ffi_import"))] use parity_scale_codec::{Decode, Encode}; -#[cfg(feature = "std")] -use serde::Deserialize; -use serde::Serialize; +use serde::{ser::SerializeStruct, Deserialize, Serialize}; use serde_with::{DeserializeFromStr, SerializeDisplay}; +use w3f_bls::SerializableToBytes; pub use self::signature::*; @@ -115,19 +114,22 @@ impl FromStr for Algorithm { #[cfg(not(feature = "ffi_import"))] #[derive(Debug, Clone)] pub enum KeyGenOption { + /// Use random number generator + #[cfg(feature = "rand")] + Random, /// Use seed UseSeed(Vec), /// Derive from private key - FromPrivateKey(PrivateKey), + FromPrivateKey(Box), } ffi::ffi_item! { /// Configuration of key generation - #[derive(Clone, Default)] + #[derive(Clone)] #[cfg_attr(not(feature="ffi_import"), derive(Debug))] pub struct KeyGenConfiguration { /// Options - key_gen_option: Option, + key_gen_option: KeyGenOption, /// Algorithm algorithm: Algorithm, } @@ -135,18 +137,32 @@ ffi::ffi_item! { #[ffi_impl_opaque] impl KeyGenConfiguration { - /// Use seed + /// Construct using random number generation with [`Ed25519`](Algorithm::Ed25519) algorithm + #[cfg(feature = "rand")] #[must_use] - pub fn use_seed(mut self, seed: Vec) -> Self { - self.key_gen_option = Some(KeyGenOption::UseSeed(seed)); - self + pub fn from_random() -> Self { + Self { + key_gen_option: KeyGenOption::Random, + algorithm: Algorithm::default(), + } } - /// Use private key + /// Construct using seed with [`Ed25519`](Algorithm::Ed25519) algorithm #[must_use] - pub fn use_private_key(mut self, private_key: PrivateKey) -> Self { - self.key_gen_option = Some(KeyGenOption::FromPrivateKey(private_key)); - self + pub fn from_seed(seed: Vec) -> Self { + Self { + key_gen_option: KeyGenOption::UseSeed(seed), + algorithm: Algorithm::default(), + } + } + + /// Construct using private key with [`Ed25519`](Algorithm::Ed25519) algorithm + #[must_use] + pub fn from_private_key(private_key: impl Into>) -> Self { + Self { + key_gen_option: KeyGenOption::FromPrivateKey(private_key.into()), + algorithm: Algorithm::default(), + } } /// With algorithm @@ -175,30 +191,26 @@ impl KeyPair { /// /// # Errors /// Fails if decoding fails - #[cfg(any(feature = "std", feature = "ffi_import"))] + #[cfg(feature = "rand")] pub fn generate() -> Result { - Self::generate_with_configuration(KeyGenConfiguration::default()) + Self::generate_with_configuration(KeyGenConfiguration::from_random()) } } #[ffi_impl_opaque] impl KeyPair { - /// Digest function - pub fn digest_function(&self) -> Algorithm { - self.private_key.digest_function() + /// Algorithm + pub fn algorithm(&self) -> Algorithm { + self.private_key.algorithm() } - /// Construct `KeyPair` - /// + /// Construct a [`KeyPair`] /// # Errors /// If public and private key don't match, i.e. if they don't make a pair - #[cfg(any(feature = "std", feature = "ffi_import"))] pub fn new(public_key: PublicKey, private_key: PrivateKey) -> Result { - let algorithm = private_key.digest_function(); + let algorithm = private_key.algorithm(); - if algorithm != public_key.digest_function() { - #[cfg(not(feature = "std"))] - use alloc::borrow::ToOwned as _; + if algorithm != public_key.algorithm() { return Err(Error::KeyGen("Mismatch of key algorithms".to_owned())); } @@ -216,10 +228,9 @@ impl KeyPair { /// /// # Errors /// Fails if decoding fails - #[cfg(any(feature = "std", feature = "ffi_import"))] pub fn generate_with_configuration(configuration: KeyGenConfiguration) -> Result { let key_gen_option = match (configuration.algorithm, configuration.key_gen_option) { - (Algorithm::Secp256k1, Some(KeyGenOption::UseSeed(seed))) if seed.len() < 32 => { + (Algorithm::Secp256k1, KeyGenOption::UseSeed(seed)) if seed.len() < 32 => { return Err(Error::KeyGen( "secp256k1 seed for must be at least 32 bytes long".to_owned(), )) @@ -227,23 +238,55 @@ impl KeyPair { (_, key_gen_option) => key_gen_option, }; - let (public_key, private_key) = match configuration.algorithm { - Algorithm::Ed25519 => signature::ed25519::Ed25519Sha512::keypair(key_gen_option), + Ok(match configuration.algorithm { + Algorithm::Ed25519 => signature::ed25519::Ed25519Sha512::keypair(key_gen_option).into(), Algorithm::Secp256k1 => { - signature::secp256k1::EcdsaSecp256k1Sha256::keypair(key_gen_option) + signature::secp256k1::EcdsaSecp256k1Sha256::keypair(key_gen_option).into() } - Algorithm::BlsNormal => signature::bls::BlsNormal::keypair(key_gen_option), - Algorithm::BlsSmall => signature::bls::BlsSmall::keypair(key_gen_option), - }?; - - Ok(Self { - public_key, - private_key, + Algorithm::BlsNormal => signature::bls::BlsNormal::keypair(key_gen_option).into(), + Algorithm::BlsSmall => signature::bls::BlsSmall::keypair(key_gen_option).into(), }) } } -#[cfg(feature = "std")] +impl From<(ed25519::PublicKey, ed25519::PrivateKey)> for KeyPair { + fn from((public_key, private_key): (ed25519::PublicKey, ed25519::PrivateKey)) -> Self { + Self { + public_key: PublicKey(Box::new(PublicKeyInner::Ed25519(public_key))), + private_key: PrivateKey(Box::new(PrivateKeyInner::Ed25519(private_key))), + } + } +} + +impl From<(secp256k1::PublicKey, secp256k1::PrivateKey)> for KeyPair { + fn from((public_key, private_key): (secp256k1::PublicKey, secp256k1::PrivateKey)) -> Self { + Self { + public_key: PublicKey(Box::new(PublicKeyInner::Secp256k1(public_key))), + private_key: PrivateKey(Box::new(PrivateKeyInner::Secp256k1(private_key))), + } + } +} + +impl From<(bls::BlsNormalPublicKey, bls::BlsNormalPrivateKey)> for KeyPair { + fn from( + (public_key, private_key): (bls::BlsNormalPublicKey, bls::BlsNormalPrivateKey), + ) -> Self { + Self { + public_key: PublicKey(Box::new(PublicKeyInner::BlsNormal(public_key))), + private_key: PrivateKey(Box::new(PrivateKeyInner::BlsNormal(private_key))), + } + } +} + +impl From<(bls::BlsSmallPublicKey, bls::BlsSmallPrivateKey)> for KeyPair { + fn from((public_key, private_key): (bls::BlsSmallPublicKey, bls::BlsSmallPrivateKey)) -> Self { + Self { + public_key: PublicKey(Box::new(PublicKeyInner::BlsSmall(public_key))), + private_key: PrivateKey(Box::new(PrivateKeyInner::BlsSmall(private_key))), + } + } +} + #[cfg(not(feature = "ffi_import"))] impl<'de> Deserialize<'de> for KeyPair { fn deserialize(deserializer: D) -> Result @@ -272,183 +315,375 @@ impl From for (PublicKey, PrivateKey) { } } -ffi::ffi_item! { - /// Public Key used in signatures. - #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, CopyGetters)] - #[cfg_attr(not(feature="ffi_import"), derive(DebugCustom, Display, Hash, DeserializeFromStr, SerializeDisplay, Decode, Encode, IntoSchema))] - #[cfg_attr(not(feature="ffi_import"), debug(fmt = "{{digest: {digest_function}, payload: {}}}", "self.normalize()"))] - #[cfg_attr(not(feature="ffi_import"), display(fmt = "{}", "self.normalize()"))] - pub struct PublicKey { - /// Digest function - #[getset(get_copy = "pub")] - digest_function: Algorithm, - /// Key payload - payload: ConstVec, +#[derive(Clone, PartialEq, Eq)] +#[cfg_attr( + not(feature = "ffi_import"), + derive(DeserializeFromStr, SerializeDisplay) +)] +#[allow(missing_docs, variant_size_differences)] +enum PublicKeyInner { + Ed25519(ed25519::PublicKey), + Secp256k1(secp256k1::PublicKey), + BlsNormal(bls::BlsNormalPublicKey), + BlsSmall(bls::BlsSmallPublicKey), +} + +#[cfg(not(feature = "ffi_import"))] +impl fmt::Debug for PublicKeyInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple(self.algorithm().as_static_str()) + .field(&self.normalize()) + .finish() } } -#[ffi_impl_opaque] -impl PublicKey { - /// Creates a new public key from raw bytes received from elsewhere - pub fn from_raw(algorithm: Algorithm, payload: ConstVec) -> Self { - Self { - digest_function: algorithm, - payload, - } +#[cfg(not(feature = "ffi_import"))] +impl fmt::Display for PublicKeyInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&self.normalize()) + } +} + +#[cfg(not(feature = "ffi_import"))] +impl FromStr for PublicKeyInner { + type Err = ParseError; + + fn from_str(key: &str) -> Result { + let bytes = hex_decode(key)?; + + multihash::Multihash::try_from(bytes).map(Into::into) } +} + +#[cfg(not(feature = "ffi_import"))] +impl PublicKeyInner { + fn normalize(&self) -> String { + let multihash: &multihash::Multihash = &self.clone().into(); + let bytes = Vec::try_from(multihash).expect("Failed to convert multihash to bytes."); + + let mut bytes_iter = bytes.into_iter(); + let fn_code = hex::encode(bytes_iter.by_ref().take(2).collect::>()); + let dig_size = hex::encode(bytes_iter.by_ref().take(1).collect::>()); + let key = hex::encode_upper(bytes_iter.by_ref().collect::>()); - /// Extracts the raw bytes from public key - pub fn into_raw(self) -> (Algorithm, ConstVec) { - (self.digest_function, self.payload) + format!("{fn_code}{dig_size}{key}") + } +} + +impl PublicKeyInner { + fn to_raw(&self) -> (Algorithm, Vec) { + (self.algorithm(), self.payload()) } /// Key payload - // TODO: Derive with getset once FFI impl is fixed - pub fn payload(&self) -> &[u8] { - self.payload.as_ref() + fn payload(&self) -> Vec { + use w3f_bls::SerializableToBytes as _; + + match self { + Self::Ed25519(key) => key.as_bytes().to_vec(), + Self::Secp256k1(key) => key.to_sec1_bytes().to_vec(), + Self::BlsNormal(key) => key.to_bytes(), + Self::BlsSmall(key) => key.to_bytes(), + } } - #[cfg(feature = "std")] - fn try_from_private(private_key: PrivateKey) -> Result { - let digest_function = private_key.digest_function(); - let key_gen_option = Some(KeyGenOption::FromPrivateKey(private_key)); + fn algorithm(&self) -> Algorithm { + match self { + Self::Ed25519(_) => Algorithm::Ed25519, + Self::Secp256k1(_) => Algorithm::Secp256k1, + Self::BlsNormal(_) => Algorithm::BlsNormal, + Self::BlsSmall(_) => Algorithm::BlsSmall, + } + } +} - let (public_key, _) = match digest_function { - Algorithm::Ed25519 => signature::ed25519::Ed25519Sha512::keypair(key_gen_option), - Algorithm::Secp256k1 => { - signature::secp256k1::EcdsaSecp256k1Sha256::keypair(key_gen_option) - } - Algorithm::BlsNormal => signature::bls::BlsNormal::keypair(key_gen_option), - Algorithm::BlsSmall => signature::bls::BlsSmall::keypair(key_gen_option), - }?; +ffi::ffi_item! { + /// Public Key used in signatures. + #[derive(Debug, Clone, PartialEq, Eq, TypeId)] + #[cfg_attr(not(feature="ffi_import"), derive(Deserialize, Serialize, derive_more::Display))] + #[cfg_attr(not(feature="ffi_import"), display(fmt = "{_0}"))] + #[cfg_attr(all(feature = "ffi_export", not(feature = "ffi_import")), ffi_type(opaque))] + #[allow(missing_docs)] + pub struct PublicKey(Box); +} - Ok(public_key) +#[ffi_impl_opaque] +impl PublicKey { + /// Creates a new public key from raw bytes received from elsewhere + /// + /// # Errors + /// + /// Fails if public key parsing fails + pub fn from_raw(algorithm: Algorithm, payload: &[u8]) -> Result { + match algorithm { + Algorithm::Ed25519 => { + ed25519::Ed25519Sha512::parse_public_key(payload).map(PublicKeyInner::Ed25519) + } + Algorithm::Secp256k1 => secp256k1::EcdsaSecp256k1Sha256::parse_public_key(payload) + .map(PublicKeyInner::Secp256k1), + Algorithm::BlsNormal => { + bls::BlsNormal::parse_public_key(payload).map(PublicKeyInner::BlsNormal) + } + Algorithm::BlsSmall => { + bls::BlsSmall::parse_public_key(payload).map(PublicKeyInner::BlsSmall) + } + } + .map(Box::new) + .map(PublicKey) } - /// Construct `PrivateKey` from hex encoded string + /// Extracts the raw bytes from public key, copying the payload. /// + /// `into_raw()` without copying is not provided because underlying crypto + /// libraries do not provide move functionality. + pub fn to_raw(&self) -> (Algorithm, Vec) { + self.0.to_raw() + } + + /// Construct [`PublicKey`] from hex encoded string /// # Errors /// /// - If the given payload is not hex encoded /// - If the given payload is not a valid private key - #[cfg(feature = "std")] - pub fn from_hex(digest_function: Algorithm, payload: &str) -> Result { + pub fn from_hex(digest_function: Algorithm, payload: &str) -> Result { let payload = hex_decode(payload)?; - let payload = ConstVec::new(payload); - // NOTE: PrivateKey does some validation by generating a public key from the provided bytes - // we can't really do this for PublicKey - // this can be solved if the keys used here would be actually aware of the underlying crypto primitive types - // instead of just being raw bytes - Ok(Self { - digest_function, - payload, - }) + Self::from_raw(digest_function, &payload) + } + + /// Get the digital signature algorithm of the public key + pub fn algorithm(&self) -> Algorithm { + self.0.algorithm() } } -impl FromStr for PublicKey { - type Err = Error; +#[cfg(not(feature = "ffi_import"))] +impl core::hash::Hash for PublicKey { + fn hash(&self, state: &mut H) { + (self.to_raw()).hash(state) + } +} - // TODO: Can we check the key is valid? - fn from_str(key: &str) -> Result { - let bytes = hex_decode(key).map_err(|err| Error::Parse(err.to_string()))?; +impl PartialOrd for PublicKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} - multihash::Multihash::try_from(bytes) - .map_err(|err| Error::Parse(err.to_string())) - .map(Into::into) +impl Ord for PublicKey { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.to_raw().cmp(&other.to_raw()) } } #[cfg(not(feature = "ffi_import"))] -impl PublicKey { - fn normalize(&self) -> String { - let multihash: &multihash::Multihash = &self.clone().into(); - let bytes = Vec::try_from(multihash).expect("Failed to convert multihash to bytes."); +impl Encode for PublicKey { + fn size_hint(&self) -> usize { + self.to_raw().size_hint() + } - let mut bytes_iter = bytes.into_iter(); - let fn_code = hex::encode(bytes_iter.by_ref().take(2).collect::>()); - let dig_size = hex::encode(bytes_iter.by_ref().take(1).collect::>()); - let key = hex::encode_upper(bytes_iter.by_ref().collect::>()); + fn encode_to(&self, dest: &mut W) { + self.to_raw().encode_to(dest); + } +} - format!("{fn_code}{dig_size}{key}") +#[cfg(not(feature = "ffi_import"))] +impl Decode for PublicKey { + fn decode( + input: &mut I, + ) -> Result { + let algorithm = Algorithm::decode(input)?; + let payload = Vec::decode(input)?; + Self::from_raw(algorithm, &payload).map_err(|_| { + parity_scale_codec::Error::from( + "Failed to construct public key from digest function and payload", + ) + }) + } +} + +#[cfg(not(feature = "ffi_import"))] +impl IntoSchema for PublicKey { + fn type_name() -> String { + Self::id() + } + + fn update_schema_map(metamap: &mut MetaMap) { + if !metamap.contains_key::() { + if !metamap.contains_key::() { + ::update_schema_map(metamap); + } + if !metamap.contains_key::>() { + as iroha_schema::IntoSchema>::update_schema_map(metamap); + } + + metamap.insert::(Metadata::Struct(NamedFieldsMeta { + declarations: vec![ + Declaration { + name: String::from("algorithm"), + ty: core::any::TypeId::of::(), + }, + Declaration { + name: String::from("payload"), + ty: core::any::TypeId::of::>(), + }, + ], + })); + } + } +} + +impl FromStr for PublicKey { + type Err = ParseError; + + fn from_str(key: &str) -> Result { + PublicKeyInner::from_str(key).map(Box::new).map(Self) } } // TODO: Enable in ffi_import -#[cfg(feature = "std")] #[cfg(not(feature = "ffi_import"))] impl From for PublicKey { fn from(private_key: PrivateKey) -> Self { - Self::try_from_private(private_key).expect("can't fail for valid `PrivateKey`") + let algorithm = private_key.algorithm(); + let key_gen_option = KeyGenOption::FromPrivateKey(Box::new(private_key)); + + let inner = match algorithm { + Algorithm::Ed25519 => { + PublicKeyInner::Ed25519(ed25519::Ed25519Sha512::keypair(key_gen_option).0) + } + Algorithm::Secp256k1 => PublicKeyInner::Secp256k1( + secp256k1::EcdsaSecp256k1Sha256::keypair(key_gen_option).0, + ), + Algorithm::BlsNormal => { + PublicKeyInner::BlsNormal(bls::BlsNormal::keypair(key_gen_option).0) + } + Algorithm::BlsSmall => { + PublicKeyInner::BlsSmall(bls::BlsSmall::keypair(key_gen_option).0) + } + }; + PublicKey(Box::new(inner)) } } +#[derive(Clone)] +#[allow(missing_docs, variant_size_differences)] +enum PrivateKeyInner { + Ed25519(ed25519::PrivateKey), + Secp256k1(secp256k1::PrivateKey), + BlsNormal(bls::BlsNormalPrivateKey), + BlsSmall(bls::BlsSmallPrivateKey), +} + ffi::ffi_item! { /// Private Key used in signatures. - #[derive(Clone, PartialEq, Eq, CopyGetters)] - #[cfg_attr(not(feature="ffi_import"), derive(DebugCustom, Display, Serialize))] - #[cfg_attr(not(feature="ffi_import"), debug(fmt = "{{digest: {digest_function}, payload: {}}}", "hex::encode_upper(payload)"))] - #[cfg_attr(not(feature="ffi_import"), display(fmt = "{}", "hex::encode_upper(payload)"))] - pub struct PrivateKey { - /// Digest function - #[getset(get_copy = "pub")] - digest_function: Algorithm, - /// Key payload - #[serde(with = "hex::serde")] - payload: ConstVec, - } + #[derive(Clone)] + #[cfg_attr(all(feature = "ffi_export", not(feature = "ffi_import")), ffi_type(opaque))] + #[allow(missing_docs, variant_size_differences)] + pub struct PrivateKey(Box); } -#[ffi_impl_opaque] -impl PrivateKey { - /// Key payload - // TODO: Derive with getset once FFI impl is fixed - pub fn payload(&self) -> &[u8] { - self.payload.as_ref() +impl PartialEq for PrivateKey { + fn eq(&self, other: &Self) -> bool { + match (self.0.borrow(), other.0.borrow()) { + (PrivateKeyInner::Ed25519(l), PrivateKeyInner::Ed25519(r)) => l == r, + (PrivateKeyInner::Secp256k1(l), PrivateKeyInner::Secp256k1(r)) => l == r, + (PrivateKeyInner::BlsNormal(l), PrivateKeyInner::BlsNormal(r)) => { + l.to_bytes() == r.to_bytes() + } + (PrivateKeyInner::BlsSmall(l), PrivateKeyInner::BlsSmall(r)) => { + l.to_bytes() == r.to_bytes() + } + _ => false, + } } } +impl Eq for PrivateKey {} + impl PrivateKey { - /// Construct `PrivateKey` from hex encoded string without validating the key + /// Creates a new public key from raw bytes received from elsewhere /// /// # Errors /// - /// If the given payload is not hex encoded - pub fn from_hex_unchecked( - digest_function: Algorithm, - payload: &(impl AsRef<[u8]> + ?Sized), - ) -> Result { - Ok(Self { - digest_function, - payload: crate::hex_decode(payload).map(ConstVec::new)?, - }) + /// - If the given payload is not a valid private key for the given digest function + pub fn from_raw(algorithm: Algorithm, payload: &[u8]) -> Result { + match algorithm { + Algorithm::Ed25519 => { + ed25519::Ed25519Sha512::parse_private_key(payload).map(PrivateKeyInner::Ed25519) + } + Algorithm::Secp256k1 => secp256k1::EcdsaSecp256k1Sha256::parse_private_key(payload) + .map(PrivateKeyInner::Secp256k1), + Algorithm::BlsNormal => { + bls::BlsNormal::parse_private_key(payload).map(PrivateKeyInner::BlsNormal) + } + Algorithm::BlsSmall => { + bls::BlsSmall::parse_private_key(payload).map(PrivateKeyInner::BlsSmall) + } + } + .map(Box::new) + .map(PrivateKey) } - /// Construct `PrivateKey` from hex encoded string + /// Construct [`PrivateKey`] from hex encoded string /// /// # Errors /// /// - If the given payload is not hex encoded /// - If the given payload is not a valid private key - #[cfg(feature = "std")] - pub fn from_hex(digest_function: Algorithm, payload: &str) -> Result { + pub fn from_hex(algorithm: Algorithm, payload: &str) -> Result { let payload = hex_decode(payload)?; - let payload = ConstVec::new(payload); - let private_key_candidate = Self { - digest_function, - payload: payload.clone(), - }; + Self::from_raw(algorithm, &payload) + } - PublicKey::try_from_private(private_key_candidate).map(|_| Self { - digest_function, - payload, - }) + /// Get the digital signature algorithm of the private key + pub fn algorithm(&self) -> Algorithm { + match self.0.borrow() { + PrivateKeyInner::Ed25519(_) => Algorithm::Ed25519, + PrivateKeyInner::Secp256k1(_) => Algorithm::Secp256k1, + PrivateKeyInner::BlsNormal(_) => Algorithm::BlsNormal, + PrivateKeyInner::BlsSmall(_) => Algorithm::BlsSmall, + } + } + + /// Key payload + fn payload(&self) -> Vec { + match self.0.borrow() { + PrivateKeyInner::Ed25519(key) => key.to_keypair_bytes().to_vec(), + PrivateKeyInner::Secp256k1(key) => key.to_bytes().to_vec(), + PrivateKeyInner::BlsNormal(key) => key.to_bytes(), + PrivateKeyInner::BlsSmall(key) => key.to_bytes(), + } + } +} + +#[cfg(not(feature = "ffi_import"))] +impl core::fmt::Debug for PrivateKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple(self.algorithm().as_static_str()) + .field(&hex::encode_upper(self.payload())) + .finish() + } +} + +#[cfg(not(feature = "ffi_import"))] +impl core::fmt::Display for PrivateKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&hex::encode_upper(self.payload())) + } +} + +#[cfg(not(feature = "ffi_import"))] +impl Serialize for PrivateKey { + fn serialize(&self, serializer: S) -> Result { + let mut state = serializer.serialize_struct("PublicKey", 2)?; + state.serialize_field("digest_function", &self.algorithm())?; + state.serialize_field("payload", &hex::encode(self.payload()))?; + state.end() } } -#[cfg(feature = "std")] impl<'de> Deserialize<'de> for PrivateKey { fn deserialize(deserializer: D) -> Result where @@ -480,8 +715,8 @@ impl SessionKey { } /// Shim for decoding hexadecimal strings -pub(crate) fn hex_decode + ?Sized>(payload: &T) -> Result, Error> { - hex::decode(payload).map_err(|err| Error::Parse(err.to_string())) +pub(crate) fn hex_decode + ?Sized>(payload: &T) -> Result, ParseError> { + hex::decode(payload).map_err(|err| ParseError(err.to_string())) } pub mod error { @@ -496,6 +731,14 @@ pub mod error { #[cfg(feature = "std")] impl std::error::Error for NoSuchAlgorithm {} + /// Error parsing a key + #[derive(Debug, Display, Clone, serde::Deserialize, PartialEq, Eq)] + #[display(fmt = "{_0}")] + pub struct ParseError(pub(crate) String); + + #[cfg(feature = "std")] + impl std::error::Error for ParseError {} + /// Error when dealing with cryptographic functions #[derive(Debug, Display, serde::Deserialize, PartialEq, Eq)] pub enum Error { @@ -504,10 +747,13 @@ pub mod error { NoSuchAlgorithm(String), /// Occurs during deserialization of a private or public key #[display(fmt = "Key could not be parsed. {_0}")] - Parse(String), + Parse(ParseError), /// Returned when an error occurs during the signing process #[display(fmt = "Signing failed. {_0}")] Signing(String), + /// Returned when an error occurs during the signature verification process + #[display(fmt = "Signature verification failed")] + BadSignature, /// Returned when an error occurs during key generation #[display(fmt = "Key generation failed. {_0}")] KeyGen(String), @@ -522,13 +768,18 @@ pub mod error { Other(String), } - #[cfg(feature = "std")] impl From for Error { fn from(source: NoSuchAlgorithm) -> Self { Self::NoSuchAlgorithm(source.to_string()) } } + impl From for Error { + fn from(source: ParseError) -> Self { + Self::Parse(source) + } + } + #[cfg(feature = "std")] impl std::error::Error for Error {} } @@ -576,7 +827,6 @@ mod ffi { Clone: { KeyGenConfiguration, PublicKey, PrivateKey, KeyPair, Signature }, Eq: { PublicKey, PrivateKey, KeyPair, Signature }, Ord: { PublicKey, Signature }, - Default: { KeyGenConfiguration }, } // NOTE: Makes sure that only one `dealloc` is exported per generated dynamic library @@ -602,15 +852,11 @@ pub mod prelude { #[cfg(test)] mod tests { use parity_scale_codec::{Decode, Encode}; - #[cfg(all(feature = "std", not(feature = "ffi_import")))] + #[cfg(not(feature = "ffi_import"))] use serde::Deserialize; use super::*; - fn parse_const_bytes(hex: &str) -> ConstVec { - ConstVec::new(hex_decode(hex).expect("Failed to decode hex bytes")) - } - #[test] fn algorithm_serialize_deserialize_consistent() { for algorithm in [ @@ -627,8 +873,9 @@ mod tests { ); } } + #[test] - #[cfg(any(feature = "std", feature = "ffi_import"))] + #[cfg(feature = "rand")] fn key_pair_serialize_deserialize_consistent() { for algorithm in [ Algorithm::Ed25519, @@ -637,7 +884,7 @@ mod tests { Algorithm::BlsSmall, ] { let key_pair = KeyPair::generate_with_configuration( - KeyGenConfiguration::default().with_algorithm(algorithm), + KeyGenConfiguration::from_random().with_algorithm(algorithm), ) .expect("Failed to generate key pair"); @@ -671,27 +918,26 @@ mod tests { } #[test] - #[cfg(any(feature = "std", feature = "ffi_import"))] fn key_pair_match() { - assert!(KeyPair::new("ed012059C8A4DA1EBB5380F74ABA51F502714652FDCCE9611FAFB9904E4A3C4D382774" + KeyPair::new("ed012059C8A4DA1EBB5380F74ABA51F502714652FDCCE9611FAFB9904E4A3C4D382774" .parse() .expect("Public key not in mulithash format"), PrivateKey::from_hex( Algorithm::Ed25519, "93CA389FC2979F3F7D2A7F8B76C70DE6D5EAF5FA58D4F93CB8B0FB298D398ACC59C8A4DA1EBB5380F74ABA51F502714652FDCCE9611FAFB9904E4A3C4D382774" - ).expect("Private key not hex encoded")).is_ok()); + ).expect("Private key not hex encoded")).unwrap(); - assert!(KeyPair::new("ea0161040FCFADE2FC5D9104A9ACF9665EA545339DDF10AE50343249E01AF3B8F885CD5D52956542CCE8105DB3A2EC4006E637A7177FAAEA228C311F907DAAFC254F22667F1A1812BB710C6F4116A1415275D27BB9FB884F37E8EF525CC31F3945E945FA" + KeyPair::new("ea01309060D021340617E9554CCBC2CF3CC3DB922A9BA323ABDF7C271FCC6EF69BE7A8DEBCA7D9E96C0F0089ABA22CDAADE4A2" .parse() - .expect("Public key not in mulithash format"), + .expect("Public key not in multihash format"), PrivateKey::from_hex( Algorithm::BlsNormal, - "0000000000000000000000000000000049BF70187154C57B97AF913163E8E875733B4EAF1F3F0689B31CE392129493E9" - ).expect("Private key not hex encoded")).is_ok()); + "1ca347641228c3b79aa43839dedc85fa51c0e8b9b6a00f6b0d6b0423e902973f", + ).expect("Private key not hex encoded")).unwrap(); } #[test] - #[cfg(any(feature = "std", feature = "ffi_import"))] + #[cfg(feature = "rand")] fn encode_decode_public_key_consistent() { for algorithm in [ Algorithm::Ed25519, @@ -700,7 +946,7 @@ mod tests { Algorithm::BlsSmall, ] { let key_pair = KeyPair::generate_with_configuration( - KeyGenConfiguration::default().with_algorithm(algorithm), + KeyGenConfiguration::from_random().with_algorithm(algorithm), ) .expect("Failed to generate key pair"); let (public_key, _) = key_pair.into(); @@ -718,7 +964,6 @@ mod tests { } #[test] - #[cfg(feature = "std")] fn invalid_private_key() { assert!(PrivateKey::from_hex( Algorithm::Ed25519, @@ -733,23 +978,22 @@ mod tests { } #[test] - #[cfg(any(feature = "std", feature = "ffi_import"))] fn key_pair_mismatch() { - assert!(KeyPair::new("ed012059C8A4DA1EBB5380F74ABA51F502714652FDCCE9611FAFB9904E4A3C4D382774" + KeyPair::new("ed012059C8A4DA1EBB5380F74ABA51F502714652FDCCE9611FAFB9904E4A3C4D382774" .parse() .expect("Public key not in mulithash format"), PrivateKey::from_hex( Algorithm::Ed25519, "3A7991AF1ABB77F3FD27CC148404A6AE4439D095A63591B77C788D53F708A02A1509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4" - ).expect("Private key not valid")).is_err()); + ).expect("Private key not valid")).unwrap_err(); - assert!(KeyPair::new("ea0161040FCFADE2FC5D9104A9ACF9665EA545339DDF10AE50343249E01AF3B8F885CD5D52956542CCE8105DB3A2EC4006E637A7177FAAEA228C311F907DAAFC254F22667F1A1812BB710C6F4116A1415275D27BB9FB884F37E8EF525CC31F3945E945FA" + KeyPair::new("ea01309060D021340617E9554CCBC2CF3CC3DB922A9BA323ABDF7C271FCC6EF69BE7A8DEBCA7D9E96C0F0089ABA22CDAADE4A2" .parse() .expect("Public key not in mulithash format"), PrivateKey::from_hex( Algorithm::BlsNormal, - "000000000000000000000000000000002F57460183837EFBAC6AA6AB3B8DBB7CFFCFC59E9448B7860A206D37D470CBA3" - ).expect("Private key not valid")).is_err()); + "CC176E44C41AA144FD1BEE4E0BCD2EF43F06D0C7BC2988E89A799951D240E503", + ).expect("Private key not valid")).unwrap_err(); } #[test] @@ -758,53 +1002,47 @@ mod tests { assert_eq!( format!( "{}", - PublicKey { - digest_function: Algorithm::Ed25519, - payload: parse_const_bytes( - "1509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4" - ) - } + PublicKey::from_hex( + Algorithm::Ed25519, + "1509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4" + ) + .unwrap() ), "ed01201509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4" ); assert_eq!( format!( "{}", - PublicKey { - digest_function: Algorithm::Secp256k1, - payload: parse_const_bytes( - "0312273E8810581E58948D3FB8F9E8AD53AAA21492EBB8703915BBB565A21B7FCC" - ) - } + PublicKey::from_hex( + Algorithm::Secp256k1, + "0312273E8810581E58948D3FB8F9E8AD53AAA21492EBB8703915BBB565A21B7FCC" + ) + .unwrap() ), "e701210312273E8810581E58948D3FB8F9E8AD53AAA21492EBB8703915BBB565A21B7FCC" ); assert_eq!( format!( "{}", - PublicKey { - digest_function: Algorithm::BlsNormal, - payload: parse_const_bytes( - "04175B1E79B15E8A2D5893BF7F8933CA7D0863105D8BAC3D6F976CB043378A0E4B885C57ED14EB85FC2FABC639ADC7DE7F0020C70C57ACC38DEE374AF2C04A6F61C11DE8DF9034B12D849C7EB90099B0881267D0E1507D4365D838D7DCC31511E7" - ) - } + PublicKey::from_hex( + Algorithm::BlsNormal, + "9060D021340617E9554CCBC2CF3CC3DB922A9BA323ABDF7C271FCC6EF69BE7A8DEBCA7D9E96C0F0089ABA22CDAADE4A2", + ).unwrap() ), - "ea016104175B1E79B15E8A2D5893BF7F8933CA7D0863105D8BAC3D6F976CB043378A0E4B885C57ED14EB85FC2FABC639ADC7DE7F0020C70C57ACC38DEE374AF2C04A6F61C11DE8DF9034B12D849C7EB90099B0881267D0E1507D4365D838D7DCC31511E7" + "ea01309060D021340617E9554CCBC2CF3CC3DB922A9BA323ABDF7C271FCC6EF69BE7A8DEBCA7D9E96C0F0089ABA22CDAADE4A2", ); assert_eq!( format!( "{}", - PublicKey { - digest_function: Algorithm::BlsSmall, - payload: parse_const_bytes( - "040CB3231F601E7245A6EC9A647B450936F707CA7DC347ED258586C1924941D8BC38576473A8BA3BB2C37E3E121130AB67103498A96D0D27003E3AD960493DA79209CF024E2AA2AE961300976AEEE599A31A5E1B683EAA1BCFFC47B09757D20F21123C594CF0EE0BAF5E1BDD272346B7DC98A8F12C481A6B28174076A352DA8EAE881B90911013369D7FA960716A5ABC5314307463FA2285A5BF2A5B5C6220D68C2D34101A91DBFC531C5B9BBFB2245CCC0C50051F79FC6714D16907B1FC40E0C0" - ) - } + PublicKey::from_hex( + Algorithm::BlsSmall, + "9051D4A9C69402423413EBBA4C00BC82A0102AA2B783057BD7BCEE4DD17B37DE5D719EE84BE43783F2AE47A673A74B8315DD3E595ED1FBDFAC17DA1D7A36F642B423ED18275FAFD671B1D331439D22F12FB6EB436A47E8656F182A78DF29D310", + ).unwrap() ), - "eb01c1040CB3231F601E7245A6EC9A647B450936F707CA7DC347ED258586C1924941D8BC38576473A8BA3BB2C37E3E121130AB67103498A96D0D27003E3AD960493DA79209CF024E2AA2AE961300976AEEE599A31A5E1B683EAA1BCFFC47B09757D20F21123C594CF0EE0BAF5E1BDD272346B7DC98A8F12C481A6B28174076A352DA8EAE881B90911013369D7FA960716A5ABC5314307463FA2285A5BF2A5B5C6220D68C2D34101A91DBFC531C5B9BBFB2245CCC0C50051F79FC6714D16907B1FC40E0C0" + "eb01609051D4A9C69402423413EBBA4C00BC82A0102AA2B783057BD7BCEE4DD17B37DE5D719EE84BE43783F2AE47A673A74B8315DD3E595ED1FBDFAC17DA1D7A36F642B423ED18275FAFD671B1D331439D22F12FB6EB436A47E8656F182A78DF29D310", ); } - #[cfg(all(feature = "std", not(feature = "ffi_import")))] + #[cfg(not(feature = "ffi_import"))] #[derive(Debug, PartialEq, Deserialize, Serialize)] struct TestJson { public_key: PublicKey, @@ -812,7 +1050,7 @@ mod tests { } #[test] - #[cfg(all(feature = "std", not(feature = "ffi_import")))] + #[cfg(not(feature = "ffi_import"))] fn deserialize_keys_ed25519() { assert_eq!( serde_json::from_str::<'_, TestJson>("{ @@ -823,22 +1061,20 @@ mod tests { } }").expect("Failed to deserialize."), TestJson { - public_key: PublicKey { - digest_function: Algorithm::Ed25519, - payload: parse_const_bytes( - "1509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4" - ) - }, - private_key: PrivateKey { - digest_function: Algorithm::Ed25519, - payload: parse_const_bytes("3A7991AF1ABB77F3FD27CC148404A6AE4439D095A63591B77C788D53F708A02A1509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4"), - } + public_key: PublicKey::from_hex( + Algorithm::Ed25519, + "1509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4" + ).unwrap(), + private_key: PrivateKey::from_hex( + Algorithm::Ed25519, + "3A7991AF1ABB77F3FD27CC148404A6AE4439D095A63591B77C788D53F708A02A1509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4", + ).unwrap() } ); } #[test] - #[cfg(all(feature = "std", not(feature = "ffi_import")))] + #[cfg(not(feature = "ffi_import"))] fn deserialize_keys_secp256k1() { assert_eq!( serde_json::from_str::<'_, TestJson>("{ @@ -849,77 +1085,68 @@ mod tests { } }").expect("Failed to deserialize."), TestJson { - public_key: PublicKey { - digest_function: Algorithm::Secp256k1, - payload: parse_const_bytes( - "0312273E8810581E58948D3FB8F9E8AD53AAA21492EBB8703915BBB565A21B7FCC" - ) - }, - private_key: PrivateKey { - digest_function: Algorithm::Secp256k1, - payload: parse_const_bytes("4DF4FCA10762D4B529FE40A2188A60CA4469D2C50A825B5F33ADC2CB78C69445"), - } + public_key: PublicKey::from_hex( + Algorithm::Secp256k1, + "0312273E8810581E58948D3FB8F9E8AD53AAA21492EBB8703915BBB565A21B7FCC" + ).unwrap(), + private_key: PrivateKey::from_hex( + Algorithm::Secp256k1, + "4DF4FCA10762D4B529FE40A2188A60CA4469D2C50A825B5F33ADC2CB78C69445", + ).unwrap() } ); } #[test] - #[cfg(all(feature = "std", not(feature = "ffi_import")))] + #[cfg(not(feature = "ffi_import"))] fn deserialize_keys_bls() { assert_eq!( serde_json::from_str::<'_, TestJson>("{ - \"public_key\": \"ea016104175B1E79B15E8A2D5893BF7F8933CA7D0863105D8BAC3D6F976CB043378A0E4B885C57ED14EB85FC2FABC639ADC7DE7F0020C70C57ACC38DEE374AF2C04A6F61C11DE8DF9034B12D849C7EB90099B0881267D0E1507D4365D838D7DCC31511E7\", + \"public_key\": \"ea01309060D021340617E9554CCBC2CF3CC3DB922A9BA323ABDF7C271FCC6EF69BE7A8DEBCA7D9E96C0F0089ABA22CDAADE4A2\", \"private_key\": { \"digest_function\": \"bls_normal\", - \"payload\": \"000000000000000000000000000000002F57460183837EFBAC6AA6AB3B8DBB7CFFCFC59E9448B7860A206D37D470CBA3\" + \"payload\": \"1ca347641228c3b79aa43839dedc85fa51c0e8b9b6a00f6b0d6b0423e902973f\" } }").expect("Failed to deserialize."), TestJson { - public_key: PublicKey { - digest_function: Algorithm::BlsNormal, - payload: parse_const_bytes( - "04175B1E79B15E8A2D5893BF7F8933CA7D0863105D8BAC3D6F976CB043378A0E4B885C57ED14EB85FC2FABC639ADC7DE7F0020C70C57ACC38DEE374AF2C04A6F61C11DE8DF9034B12D849C7EB90099B0881267D0E1507D4365D838D7DCC31511E7" - ) - }, - private_key: PrivateKey { - digest_function: Algorithm::BlsNormal, - payload: parse_const_bytes("000000000000000000000000000000002F57460183837EFBAC6AA6AB3B8DBB7CFFCFC59E9448B7860A206D37D470CBA3"), - } + public_key: PublicKey::from_hex( + Algorithm::BlsNormal, + "9060D021340617E9554CCBC2CF3CC3DB922A9BA323ABDF7C271FCC6EF69BE7A8DEBCA7D9E96C0F0089ABA22CDAADE4A2", + ).unwrap(), + private_key: PrivateKey::from_hex( + Algorithm::BlsNormal, + "1ca347641228c3b79aa43839dedc85fa51c0e8b9b6a00f6b0d6b0423e902973f", + ).unwrap() } ); assert_eq!( serde_json::from_str::<'_, TestJson>("{ - \"public_key\": \"eb01C1040CB3231F601E7245A6EC9A647B450936F707CA7DC347ED258586C1924941D8BC38576473A8BA3BB2C37E3E121130AB67103498A96D0D27003E3AD960493DA79209CF024E2AA2AE961300976AEEE599A31A5E1B683EAA1BCFFC47B09757D20F21123C594CF0EE0BAF5E1BDD272346B7DC98A8F12C481A6B28174076A352DA8EAE881B90911013369D7FA960716A5ABC5314307463FA2285A5BF2A5B5C6220D68C2D34101A91DBFC531C5B9BBFB2245CCC0C50051F79FC6714D16907B1FC40E0C0\", + \"public_key\": \"eb01609051D4A9C69402423413EBBA4C00BC82A0102AA2B783057BD7BCEE4DD17B37DE5D719EE84BE43783F2AE47A673A74B8315DD3E595ED1FBDFAC17DA1D7A36F642B423ED18275FAFD671B1D331439D22F12FB6EB436A47E8656F182A78DF29D310\", \"private_key\": { \"digest_function\": \"bls_small\", - \"payload\": \"0000000000000000000000000000000060F3C1AC9ADDBBED8DB83BC1B2EF22139FB049EECB723A557A41CA1A4B1FED63\" + \"payload\": \"8cb95072914cdd8e4cf682fdbe1189cdf4fc54d445e760b3446f896dbdbf5b2b\" } }").expect("Failed to deserialize."), TestJson { - public_key: PublicKey { - digest_function: Algorithm::BlsSmall, - payload: parse_const_bytes( - "040CB3231F601E7245A6EC9A647B450936F707CA7DC347ED258586C1924941D8BC38576473A8BA3BB2C37E3E121130AB67103498A96D0D27003E3AD960493DA79209CF024E2AA2AE961300976AEEE599A31A5E1B683EAA1BCFFC47B09757D20F21123C594CF0EE0BAF5E1BDD272346B7DC98A8F12C481A6B28174076A352DA8EAE881B90911013369D7FA960716A5ABC5314307463FA2285A5BF2A5B5C6220D68C2D34101A91DBFC531C5B9BBFB2245CCC0C50051F79FC6714D16907B1FC40E0C0" - ) - }, - private_key: PrivateKey { - digest_function: Algorithm::BlsSmall, - payload: parse_const_bytes( - "0000000000000000000000000000000060F3C1AC9ADDBBED8DB83BC1B2EF22139FB049EECB723A557A41CA1A4B1FED63"), - } + public_key: PublicKey::from_hex( + Algorithm::BlsSmall, + "9051D4A9C69402423413EBBA4C00BC82A0102AA2B783057BD7BCEE4DD17B37DE5D719EE84BE43783F2AE47A673A74B8315DD3E595ED1FBDFAC17DA1D7A36F642B423ED18275FAFD671B1D331439D22F12FB6EB436A47E8656F182A78DF29D310", + ).unwrap(), + private_key: PrivateKey::from_hex( + Algorithm::BlsSmall, + "8cb95072914cdd8e4cf682fdbe1189cdf4fc54d445e760b3446f896dbdbf5b2b", + ).unwrap() } ); } #[test] - #[cfg(any(feature = "std", feature = "ffi_import"))] + #[cfg(feature = "rand")] fn secp256k1_key_gen_fails_with_seed_smaller_than_32() { let seed: Vec<_> = (0..12u8).collect(); let result = KeyPair::generate_with_configuration( - KeyGenConfiguration::default() - .with_algorithm(Algorithm::Secp256k1) - .use_seed(seed), + KeyGenConfiguration::from_seed(seed).with_algorithm(Algorithm::Secp256k1), ); assert_eq!( diff --git a/crypto/src/merkle.rs b/crypto/src/merkle.rs index 84d63cb1d8c..ec495fe0ed0 100644 --- a/crypto/src/merkle.rs +++ b/crypto/src/merkle.rs @@ -1,15 +1,13 @@ //! Merkle tree implementation. #[cfg(not(feature = "std"))] -use alloc::{format, string::String, vec::Vec}; +use alloc::{collections::VecDeque, format, string::String, vec, vec::Vec}; #[cfg(feature = "std")] use std::collections::VecDeque; use iroha_schema::{IntoSchema, TypeId}; use parity_scale_codec::{Decode, Encode}; -#[cfg(feature = "std")] -use crate::Hash; -use crate::HashOf; +use crate::{Hash, HashOf}; /// [Merkle Tree](https://en.wikipedia.org/wiki/Merkle_tree) used to validate `T` #[derive(Debug, TypeId, Decode, Encode)] @@ -79,7 +77,6 @@ impl CompleteBTree>> for MerkleTree { } } -#[cfg(feature = "std")] impl FromIterator> for MerkleTree { fn from_iter>>(iter: I) -> Self { let mut queue = iter.into_iter().map(Some).collect::>(); @@ -162,7 +159,6 @@ impl MerkleTree { } /// Add `hash` to the tail of the tree. - #[cfg(feature = "std")] pub fn add(&mut self, hash: HashOf) { // If the tree is perfect, increment its height to double the leaf capacity. if self.max_nodes_at_height() == self.len() { @@ -183,7 +179,6 @@ impl MerkleTree { self.update(self.len().saturating_sub(1)); } - #[cfg(feature = "std")] fn update(&mut self, idx: usize) { let mut node = match self.get(idx) { Some(node) => *node, @@ -209,7 +204,6 @@ impl MerkleTree { } } - #[cfg(feature = "std")] fn nodes_pair_hash( l_node: Option<&HashOf>, r_node: Option<&HashOf>, @@ -252,7 +246,7 @@ impl LeafHashIterator { } } -#[cfg(all(test, feature = "std"))] +#[cfg(test)] mod tests { use super::*; use crate::Hash; diff --git a/crypto/src/multihash.rs b/crypto/src/multihash.rs index 579f9708d02..a2c8281b134 100644 --- a/crypto/src/multihash.rs +++ b/crypto/src/multihash.rs @@ -1,4 +1,5 @@ //! Module with multihash implementation + #[cfg(not(feature = "std"))] use alloc::{ string::{String, ToString as _}, @@ -9,7 +10,7 @@ use alloc::{ use derive_more::Display; use iroha_primitives::const_vec::ConstVec; -use crate::{varint, Algorithm, NoSuchAlgorithm, PublicKey}; +use crate::{varint, Algorithm, NoSuchAlgorithm, ParseError, PublicKey, PublicKeyInner}; /// ed25519 public string pub const ED_25519_PUB_STR: &str = "ed25519-pub"; @@ -41,6 +42,28 @@ pub enum DigestFunction { Bls12381G2Pub = 0xeb, } +impl From for Algorithm { + fn from(f: DigestFunction) -> Self { + match f { + DigestFunction::Ed25519Pub => Self::Ed25519, + DigestFunction::Secp256k1Pub => Self::Secp256k1, + DigestFunction::Bls12381G1Pub => Self::BlsNormal, + DigestFunction::Bls12381G2Pub => Self::BlsSmall, + } + } +} + +impl From for DigestFunction { + fn from(a: Algorithm) -> Self { + match a { + Algorithm::Ed25519 => Self::Ed25519Pub, + Algorithm::Secp256k1 => Self::Secp256k1Pub, + Algorithm::BlsNormal => Self::Bls12381G1Pub, + Algorithm::BlsSmall => Self::Bls12381G2Pub, + } + } +} + impl core::str::FromStr for DigestFunction { type Err = NoSuchAlgorithm; @@ -83,17 +106,15 @@ impl From for u64 { } } -/// Multihash +/// Multihash. +/// +/// Offers a middleware representation of [`PublicKey`] which can be converted +/// to/from bytes or string. #[derive(Debug, PartialEq, Eq)] -pub struct Multihash { - /// digest - pub digest_function: DigestFunction, - /// hash payload - pub payload: ConstVec, -} +pub struct Multihash(PublicKeyInner); impl TryFrom> for Multihash { - type Error = MultihashConvertError; + type Error = ParseError; fn try_from(bytes: Vec) -> Result { let idx = bytes @@ -101,7 +122,7 @@ impl TryFrom> for Multihash { .enumerate() .find(|&(_, &byte)| (byte & 0b1000_0000) == 0) .ok_or_else(|| { - Self::Error::new(String::from( + ParseError(String::from( "Failed to find last byte(byte smaller than 128)", )) })? @@ -111,27 +132,26 @@ impl TryFrom> for Multihash { let mut bytes = bytes.iter().copied(); let digest_function: u64 = varint::VarUint::new(digest_function) - .map_err(|err| Self::Error::new(err.to_string()))? + .map_err(|err| ParseError(err.to_string()))? .try_into() - .map_err(|err: varint::ConvertError| Self::Error::new(err.to_string()))?; - let digest_function = digest_function.try_into()?; + .map_err(|err: varint::ConvertError| ParseError(err.to_string()))?; + let digest_function = + DigestFunction::try_from(digest_function).map_err(|err| ParseError(err.to_string()))?; + let algorithm = Algorithm::from(digest_function); let digest_size = bytes .next() - .ok_or_else(|| Self::Error::new(String::from("Digest size not found")))?; + .ok_or_else(|| ParseError(String::from("Digest size not found")))?; let payload: Vec = bytes.collect(); if payload.len() != digest_size as usize { - return Err(Self::Error::new(String::from( + return Err(ParseError(String::from( "Digest size not equal to actual length", ))); } let payload = ConstVec::new(payload); - Ok(Self { - digest_function, - payload, - }) + Ok(Self::from(*PublicKey::from_raw(algorithm, &payload)?.0)) } } @@ -141,50 +161,32 @@ impl TryFrom<&Multihash> for Vec { fn try_from(multihash: &Multihash) -> Result { let mut bytes = vec![]; - let digest_function: u64 = multihash.digest_function.into(); + let (algorithm, payload) = multihash.0.to_raw(); + let digest_function: DigestFunction = algorithm.into(); + let digest_function: u64 = digest_function.into(); let digest_function: varint::VarUint = digest_function.into(); let mut digest_function = digest_function.into(); bytes.append(&mut digest_function); - bytes.push(multihash.payload.len().try_into().map_err(|_e| { + bytes.push(payload.len().try_into().map_err(|_e| { MultihashConvertError::new(String::from("Digest size can't fit into u8")) })?); - bytes.extend_from_slice(multihash.payload.as_ref()); + bytes.extend_from_slice(payload.as_ref()); Ok(bytes) } } -impl From for PublicKey { +impl From for PublicKeyInner { #[inline] fn from(multihash: Multihash) -> Self { - let digest_function = match multihash.digest_function { - DigestFunction::Ed25519Pub => Algorithm::Ed25519, - DigestFunction::Secp256k1Pub => Algorithm::Secp256k1, - DigestFunction::Bls12381G1Pub => Algorithm::BlsNormal, - DigestFunction::Bls12381G2Pub => Algorithm::BlsSmall, - }; - - Self { - digest_function, - payload: multihash.payload, - } + multihash.0 } } -impl From for Multihash { +impl From for Multihash { #[inline] - fn from(public_key: PublicKey) -> Self { - let digest_function = match public_key.digest_function() { - Algorithm::Ed25519 => DigestFunction::Ed25519Pub, - Algorithm::Secp256k1 => DigestFunction::Secp256k1Pub, - Algorithm::BlsNormal => DigestFunction::Bls12381G1Pub, - Algorithm::BlsSmall => DigestFunction::Bls12381G2Pub, - }; - - Self { - digest_function, - payload: public_key.payload, - } + fn from(public_key: PublicKeyInner) -> Self { + Self(public_key) } } @@ -216,38 +218,40 @@ mod tests { use super::*; use crate::hex_decode; - fn parse_const_bytes(hex: &str) -> ConstVec { - ConstVec::new(hex_decode(hex).expect("Failed to decode hex bytes")) - } - #[test] fn multihash_to_bytes() { - let multihash = &Multihash { - digest_function: DigestFunction::Ed25519Pub, - payload: parse_const_bytes( - "1509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4", - ), - }; - let bytes: Vec = multihash.try_into().expect("Failed to serialize multihash"); + let multihash = Multihash( + *PublicKey::from_raw( + Algorithm::Ed25519, + &hex_decode("1509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4") + .unwrap(), + ) + .unwrap() + .0, + ); + let bytes = Vec::try_from(&multihash).expect("Failed to serialize multihash"); assert_eq!( hex_decode("ed01201509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4") - .expect("Failed to decode"), + .unwrap(), bytes ); } #[test] fn multihash_from_bytes() { - let multihash = Multihash { - digest_function: DigestFunction::Ed25519Pub, - payload: parse_const_bytes( - "1509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4", - ), - }; + let multihash = Multihash( + *PublicKey::from_raw( + Algorithm::Ed25519, + &hex_decode("1509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4") + .unwrap(), + ) + .unwrap() + .0, + ); let bytes = hex_decode("ed01201509A611AD6D97B01D871E58ED00C8FD7C3917B6CA61A8C2833A19E000AAC2E4") - .expect("Failed to decode"); - let multihash_decoded: Multihash = bytes.try_into().expect("Failed to decode."); + .unwrap(); + let multihash_decoded: Multihash = bytes.try_into().unwrap(); assert_eq!(multihash, multihash_decoded); } diff --git a/crypto/src/signature/bls/implementation.rs b/crypto/src/signature/bls/implementation.rs index 1cc3ae17efd..86156e66327 100644 --- a/crypto/src/signature/bls/implementation.rs +++ b/crypto/src/signature/bls/implementation.rs @@ -1,208 +1,87 @@ -use std::{hash::Hash, marker::PhantomData}; +#[cfg(not(feature = "std"))] +use alloc::{borrow::ToOwned as _, string::ToString as _, vec, vec::Vec}; +use core::marker::PhantomData; -/// Implements -/// and -/// -use amcl_wrapper::{ - field_elem::FieldElement, group_elem::GroupElement, group_elem_g1::G1, group_elem_g2::G2, -}; +#[cfg(feature = "rand")] +use rand_chacha::rand_core::OsRng; use sha2::Sha256; +// TODO: Better to use `SecretKey`, not `SecretKeyVT`, but it requires to implement +// interior mutability +use w3f_bls::{EngineBLS as _, PublicKey, SecretKeyVT as SecretKey, SerializableToBytes as _}; +use zeroize::Zeroize as _; pub(super) const MESSAGE_CONTEXT: &[u8; 20] = b"for signing messages"; -use super::PRIVATE_KEY_SIZE; -use crate::{ - Algorithm, ConstVec, Error, KeyGenOption, PrivateKey as IrohaPrivateKey, - PublicKey as IrohaPublicKey, -}; - -/// This is a simple alias so the consumer can just use `PrivateKey::random`() to generate a new one -/// instead of wrapping it as a private field -pub type PrivateKey = FieldElement; +use crate::{Algorithm, Error, KeyGenOption, ParseError}; pub trait BlsConfiguration { const ALGORITHM: Algorithm; - const PK_SIZE: usize; - const SIG_SIZE: usize; - type Generator: GroupElement + Eq + PartialEq + Hash; - type SignatureGroup: GroupElement + Eq + PartialEq + Hash; - fn ate_2_pairing_is_one( - g: &Self::Generator, - sig: &Self::SignatureGroup, - pk: &Self::Generator, - hash: &Self::SignatureGroup, - ) -> bool; - fn set_pairs(p: &(Self::Generator, Self::SignatureGroup)) -> (&G1, &G2); - - /// Creates a new BLS key pair - fn generate(g: &Self::Generator) -> (PublicKey, PrivateKey) { - let sk = PrivateKey::random(); - let pk = PublicKey::new(&sk, g); - (pk, sk) - } - - fn hash_to_point>(v: A, ctx: &[u8]) -> Self::SignatureGroup { - let mut value = Vec::new(); - value.extend_from_slice(ctx); - value.extend_from_slice(v.as_ref()); - Self::SignatureGroup::from_msg_hash(value.as_slice()) - } - - fn hash_msg>( - message: A, - context: Option<&'static [u8]>, - ) -> Self::SignatureGroup { - let ctx: &[u8] = context.unwrap_or(MESSAGE_CONTEXT); - Self::hash_to_point(message, ctx) - } - - fn hash_key(pk: &PublicKey, context: Option<&'static [u8]>) -> Self::SignatureGroup { - const PUBLICKEY_CONTEXT: &[u8; 47] = b"for signing public keys for proof of possession"; - let ctx: &[u8] = context.unwrap_or(PUBLICKEY_CONTEXT); - Self::hash_to_point(pk.to_bytes(), ctx) - } -} - -pub struct PublicKey(C::Generator); - -impl PublicKey { - pub fn new(sk: &PrivateKey, g: &C::Generator) -> Self { - Self(g.scalar_mul_const_time(sk)) - - // Self(g * sk) - } - - pub fn to_bytes(&self) -> Vec { - self.0.to_bytes(false) - } - - pub fn from_bytes(bytes: &[u8]) -> Result { - Ok(Self( - C::Generator::from_bytes(bytes).map_err(|e| Error::Parse(format!("{e:?}")))?, - )) - } -} - -/// Signature over a message. One gotcha for BLS signatures -/// is the need to mitigate rogue key attacks. There are two methods to achieve -/// this: compute additional work to make each message distinct -/// in a signature for each `PublicKey` or -/// use `ProofOfPossession`. `Signature` and `ProofOfPossession` MUST -/// use domain separation values that are different -/// to avoid certain types of attacks and make `Signature` -/// distinct from `ProofOfPossession`. If `ProofOfPossession` -/// and `Signature` use the same value for `context` they are effectively the same. -/// Don't do this. You have been warned. -/// -/// To make messages distinct, use `new_with_rk_mitigation`. If using -/// proof of possession mitigation, use `new`. -#[derive(Debug, Clone)] -pub struct Signature(C::SignatureGroup); + type Engine: w3f_bls::EngineBLS; -impl Signature { - pub fn new>( - message: A, - context: Option<&'static [u8]>, - sk: &PrivateKey, - ) -> Self { - Self(C::hash_msg(message, context).scalar_mul_const_time(sk)) - } - - // Verify a signature generated by `new` - pub fn verify>( - &self, - message: A, - context: Option<&'static [u8]>, - pk: &PublicKey, - g: &C::Generator, - ) -> bool { - let hash = C::hash_msg(message, context); - C::ate_2_pairing_is_one(g, &self.0, &pk.0, &hash) - } - - pub fn to_bytes(&self) -> Vec { - self.0.to_bytes(false) - } - - pub fn from_bytes(bytes: &[u8]) -> Result { - Ok(Signature( - C::SignatureGroup::from_bytes(bytes).map_err(|e| Error::Parse(format!("{e:?}")))?, - )) - } + fn extract_private_key(private_key: &crate::PrivateKey) -> Option<&SecretKey>; } pub struct BlsImpl(PhantomData); impl BlsImpl { - fn parse_public_key(pk: &IrohaPublicKey) -> Result, Error> { - assert_eq!(pk.digest_function, C::ALGORITHM); - PublicKey::from_bytes(&pk.payload) - .map_err(|e| Error::Parse(format!("Failed to parse public key: {e}"))) - } - - fn parse_private_key(sk: &IrohaPrivateKey) -> Result { - assert_eq!(sk.digest_function, C::ALGORITHM); - PrivateKey::from_bytes(&sk.payload) - .map_err(|e| Error::Parse(format!("Failed to parse private key: {e}"))) - } - // the names are from an RFC, not a good idea to change them #[allow(clippy::similar_names)] - pub fn keypair( - options: Option, - ) -> Result<(IrohaPublicKey, IrohaPrivateKey), Error> { - let (public_key, private_key) = match options { - Some(option) => match option { - // Follows https://datatracker.ietf.org/doc/draft-irtf-cfrg-bls-signature/?include_text=1 - KeyGenOption::UseSeed(ref seed) => { - let salt = b"BLS-SIG-KEYGEN-SALT-"; - let info = [0u8, PRIVATE_KEY_SIZE.try_into().unwrap()]; // key_info || I2OSP(L, 2) - let mut ikm = vec![0u8; seed.len() + 1]; - ikm[..seed.len()].copy_from_slice(seed); // IKM || I2OSP(0, 1) - let mut okm = [0u8; PRIVATE_KEY_SIZE]; - let h = hkdf::Hkdf::::new(Some(&salt[..]), &ikm); - h.expand(&info[..], &mut okm).map_err(|err| { - Error::KeyGen(format!("Failed to generate keypair: {err}")) - })?; - let private_key: PrivateKey = PrivateKey::from(&okm); - ( - PublicKey::new(&private_key, &C::Generator::generator()), - private_key, - ) - } - KeyGenOption::FromPrivateKey(ref key) => { - let private_key = Self::parse_private_key(key)?; - ( - PublicKey::new(&private_key, &C::Generator::generator()), - private_key, + pub fn keypair(mut option: KeyGenOption) -> (PublicKey, SecretKey) { + let private_key = match option { + #[cfg(feature = "rand")] + KeyGenOption::Random => SecretKey::generate(OsRng), + // Follows https://datatracker.ietf.org/doc/draft-irtf-cfrg-bls-signature/?include_text=1 + KeyGenOption::UseSeed(ref mut seed) => { + let salt = b"BLS-SIG-KEYGEN-SALT-"; + let info = [0u8, C::Engine::SECRET_KEY_SIZE.try_into().unwrap()]; // key_info || I2OSP(L, 2) + let mut ikm = vec![0u8; seed.len() + 1]; + ikm[..seed.len()].copy_from_slice(seed); // IKM || I2OSP(0, 1) + seed.zeroize(); + let mut okm = vec![0u8; C::Engine::SECRET_KEY_SIZE]; + let h = hkdf::Hkdf::::new(Some(&salt[..]), &ikm); + h.expand(&info[..], &mut okm) + .expect("`okm` has the correct length"); + + SecretKey::::from_seed(&okm) + } + KeyGenOption::FromPrivateKey(ref key) => C::extract_private_key(key) + .unwrap_or_else(|| { + panic!( + "Wrong private key type for {} algorithm, got {key:?}", + C::ALGORITHM, ) - } - }, - None => C::generate(&C::Generator::generator()), + }) + .clone(), }; - Ok(( - IrohaPublicKey { - digest_function: C::ALGORITHM, - payload: ConstVec::new(public_key.to_bytes()), - }, - IrohaPrivateKey { - digest_function: C::ALGORITHM, - payload: ConstVec::new(private_key.to_bytes()), - }, - )) + (private_key.into_public(), private_key) } - pub fn sign(message: &[u8], sk: &IrohaPrivateKey) -> Result, Error> { - let sk = Self::parse_private_key(sk)?; + pub fn sign(message: &[u8], sk: &SecretKey) -> Vec { + let message = w3f_bls::Message::new(MESSAGE_CONTEXT, message); + sk.sign(&message).to_bytes() + } + + pub fn verify( + message: &[u8], + signature: &[u8], + pk: &PublicKey, + ) -> Result<(), Error> { + let signature = w3f_bls::Signature::::from_bytes(signature) + .map_err(|_| ParseError("Failed to parse signature.".to_owned()))?; + let message = w3f_bls::Message::new(MESSAGE_CONTEXT, message); - Ok(Signature::::new(message, None, &sk).to_bytes()) + if !signature.verify(&message, pk) { + return Err(Error::BadSignature); + } + + Ok(()) } - pub fn verify(message: &[u8], signature: &[u8], pk: &IrohaPublicKey) -> Result { - let pk = Self::parse_public_key(pk)?; + pub fn parse_public_key(payload: &[u8]) -> Result, ParseError> { + PublicKey::from_bytes(payload).map_err(|err| ParseError(err.to_string())) + } - Ok(Signature::::from_bytes(signature) - .map_err(|_| Error::Parse("Failed to parse signature.".to_string()))? - .verify(message, None, &pk, &C::Generator::generator())) + pub fn parse_private_key(payload: &[u8]) -> Result, ParseError> { + SecretKey::from_bytes(payload).map_err(|err| ParseError(err.to_string())) } } diff --git a/crypto/src/signature/bls/mod.rs b/crypto/src/signature/bls/mod.rs index c3dc918abfb..8a69a3dd7a5 100644 --- a/crypto/src/signature/bls/mod.rs +++ b/crypto/src/signature/bls/mod.rs @@ -1,121 +1,79 @@ +pub use normal::{ + NormalBls as BlsNormal, NormalPrivateKey as BlsNormalPrivateKey, + NormalPublicKey as BlsNormalPublicKey, +}; +pub use small::{ + SmallBls as BlsSmall, SmallPrivateKey as BlsSmallPrivateKey, + SmallPublicKey as BlsSmallPublicKey, +}; + // Do not expose the [implementation] module & the [implementation::BlsConfiguration] trait mod implementation; -pub const PRIVATE_KEY_SIZE: usize = amcl_wrapper::constants::MODBYTES; - /// This version is the "normal" BLS signature scheme /// with the public key group in G1 and signature group in G2. /// 192 byte signatures and 97 byte public keys mod normal { - use amcl_wrapper::{ - constants::{GroupG1_SIZE, GroupG2_SIZE}, - extension_field_gt::GT, - group_elem_g1::G1, - group_elem_g2::G2, - }; + use core::borrow::Borrow as _; use super::{implementation, implementation::BlsConfiguration}; use crate::Algorithm; - pub type NormalGenerator = G1; - pub type NormalSignatureGroup = G2; - - #[cfg(test)] - pub fn normal_generate( - g: &NormalGenerator, - ) -> (NormalPublicKey, super::implementation::PrivateKey) { - NormalConfiguration::generate(g) - } - #[derive(Debug, Clone, Copy)] pub struct NormalConfiguration; + impl BlsConfiguration for NormalConfiguration { const ALGORITHM: Algorithm = Algorithm::BlsNormal; - const PK_SIZE: usize = GroupG1_SIZE; - const SIG_SIZE: usize = GroupG2_SIZE; - type Generator = NormalGenerator; - type SignatureGroup = NormalSignatureGroup; - fn ate_2_pairing_is_one( - p1: &Self::Generator, - g1: &Self::SignatureGroup, - p2: &Self::Generator, - g2: &Self::SignatureGroup, - ) -> bool { - GT::ate_2_pairing(&-p1, g1, p2, g2).is_one() - } + type Engine = w3f_bls::ZBLS; - fn set_pairs((g1, g2): &(Self::Generator, Self::SignatureGroup)) -> (&G1, &G2) { - (g1, g2) + fn extract_private_key(private_key: &crate::PrivateKey) -> Option<&NormalPrivateKey> { + if let crate::PrivateKeyInner::BlsNormal(key) = private_key.0.borrow() { + Some(key) + } else { + None + } } } pub type NormalBls = implementation::BlsImpl; - #[cfg(test)] - pub type NormalSignature = implementation::Signature; - #[cfg(test)] - pub type NormalPublicKey = implementation::PublicKey; + pub type NormalPublicKey = + w3f_bls::PublicKey<::Engine>; + pub type NormalPrivateKey = + w3f_bls::SecretKeyVT<::Engine>; } -/// This version is the small BLS signature scheme -/// with the public key group in G2 and signature group in G1. -/// 97 bytes signatures and 192 byte public keys +/// Small BLS signature scheme results in smaller signatures but slower +/// operations and bigger public key. /// -/// This results in smaller signatures but slower operations and bigger public key. -/// This is good for situations where space is a consideration and verification is infrequent +/// This is good for situations where space is a consideration and verification is infrequent. mod small { - use amcl_wrapper::{ - constants::{GroupG1_SIZE, GroupG2_SIZE}, - extension_field_gt::GT, - group_elem_g1::G1, - group_elem_g2::G2, - }; + use core::borrow::Borrow as _; use super::implementation::{self, BlsConfiguration}; use crate::Algorithm; - pub type SmallGenerator = G2; - pub type SmallSignatureGroup = G1; - - #[cfg(test)] - pub fn small_generate( - g: &SmallGenerator, - ) -> (SmallPublicKey, super::implementation::PrivateKey) { - SmallConfiguration::generate(g) - } - #[derive(Debug, Clone, Copy)] pub struct SmallConfiguration; impl BlsConfiguration for SmallConfiguration { const ALGORITHM: Algorithm = Algorithm::BlsSmall; - const PK_SIZE: usize = GroupG2_SIZE; - const SIG_SIZE: usize = GroupG1_SIZE; - type Generator = SmallGenerator; - type SignatureGroup = SmallSignatureGroup; - fn ate_2_pairing_is_one( - p1: &Self::Generator, - g1: &Self::SignatureGroup, - p2: &Self::Generator, - g2: &Self::SignatureGroup, - ) -> bool { - GT::ate_2_pairing(g1, &-p1, g2, p2).is_one() - } + type Engine = w3f_bls::TinyBLS381; - fn set_pairs((g2, g1): &(Self::Generator, Self::SignatureGroup)) -> (&G1, &G2) { - (g1, g2) + fn extract_private_key(private_key: &crate::PrivateKey) -> Option<&SmallPrivateKey> { + if let crate::PrivateKeyInner::BlsSmall(key) = private_key.0.borrow() { + Some(key) + } else { + None + } } } pub type SmallBls = implementation::BlsImpl; - #[cfg(test)] - pub type SmallSignature = implementation::Signature; - #[cfg(test)] - pub type SmallPublicKey = implementation::PublicKey; + pub type SmallPublicKey = w3f_bls::PublicKey<::Engine>; + pub type SmallPrivateKey = + w3f_bls::SecretKeyVT<::Engine>; } -pub use normal::NormalBls as BlsNormal; -pub use small::SmallBls as BlsSmall; - #[cfg(test)] mod tests; diff --git a/crypto/src/signature/bls/tests.rs b/crypto/src/signature/bls/tests.rs index 243f4d27bca..31b21a64987 100644 --- a/crypto/src/signature/bls/tests.rs +++ b/crypto/src/signature/bls/tests.rs @@ -1,14 +1,9 @@ -use amcl_wrapper::{ - constants::{GroupG1_SIZE, MODBYTES}, - field_elem::FieldElement, - group_elem::GroupElement, - types_g2::GroupG2_SIZE, -}; +use w3f_bls::SerializableToBytes as _; use super::{ - implementation::{BlsConfiguration, BlsImpl, Signature, MESSAGE_CONTEXT}, - normal::{normal_generate, NormalConfiguration, NormalGenerator, NormalSignature}, - small::{small_generate, SmallConfiguration, SmallGenerator, SmallSignature}, + implementation::{BlsConfiguration, BlsImpl}, + normal::NormalConfiguration, + small::SmallConfiguration, }; use crate::KeyGenOption; @@ -16,61 +11,87 @@ const MESSAGE_1: &[u8; 22] = b"This is a test message"; const MESSAGE_2: &[u8; 20] = b"Another test message"; const SEED: &[u8; 10] = &[1u8; 10]; -#[test] -fn size_check() { - let msg = FieldElement::random(); - let g = NormalGenerator::generator(); - let (pk, sk) = normal_generate(&g); - assert_eq!(sk.to_bytes().len(), MODBYTES); - assert_eq!(pk.to_bytes().len(), GroupG1_SIZE); - let sig = NormalSignature::new(msg.to_bytes().as_slice(), None, &sk); - assert_eq!(sig.to_bytes().len(), GroupG2_SIZE); - - let g = SmallGenerator::generator(); - let (pk, sk) = small_generate(&g); - assert_eq!(sk.to_bytes().len(), MODBYTES); - assert_eq!(pk.to_bytes().len(), GroupG2_SIZE); - let sig = SmallSignature::new(msg.to_bytes().as_slice(), None, &sk); - assert_eq!(sig.to_bytes().len(), GroupG1_SIZE); -} +#[allow(clippy::similar_names)] +fn test_keypair_generation_from_seed() { + let (pk_1, sk_1) = BlsImpl::::keypair(KeyGenOption::UseSeed(SEED.to_vec())); + let (pk_2, sk_2) = BlsImpl::::keypair(KeyGenOption::UseSeed(SEED.to_vec())); -fn signature_generation_from_seed() { - let keypair_1 = BlsImpl::::keypair(Some(KeyGenOption::UseSeed(SEED.to_vec()))).unwrap(); - let keypair_2 = BlsImpl::::keypair(Some(KeyGenOption::UseSeed(SEED.to_vec()))).unwrap(); - assert_eq!(keypair_1, keypair_2); + assert!( + (pk_1, sk_1.to_bytes()) == (pk_2, sk_2.to_bytes()), + "Keypairs are not equal" + ); } -fn signature_verification() { - let g = C::Generator::generator(); - let (pk, sk) = C::generate(&g); +fn test_signature_verification() { + let (pk, sk) = BlsImpl::::keypair(KeyGenOption::Random); - let signature_1 = Signature::::new(&MESSAGE_1[..], None, &sk); - assert!(signature_1.verify(&MESSAGE_1[..], None, &pk, &g)); + let signature_1 = BlsImpl::::sign(MESSAGE_1, &sk); + BlsImpl::::verify(MESSAGE_1, &signature_1, &pk) + .expect("Signature verification should succeed"); +} - let signature_2 = Signature::::new(&MESSAGE_2[..], Some(MESSAGE_CONTEXT), &sk); - assert!(signature_2.verify(&MESSAGE_2[..], Some(MESSAGE_CONTEXT), &pk, &g)); +fn test_signature_verification_different_messages() { + let (pk, sk) = BlsImpl::::keypair(KeyGenOption::Random); - // Should fail for different messages - assert!(!signature_1.verify(&MESSAGE_2[..], Some(MESSAGE_CONTEXT), &pk, &g)); - assert!(!signature_2.verify(&MESSAGE_1[..], None, &pk, &g)); + let signature = BlsImpl::::sign(MESSAGE_1, &sk); + BlsImpl::::verify(MESSAGE_2, &signature, &pk) + .expect_err("Signature verification for wrong message should fail"); } -#[test] -fn normal_signature_generation_from_seed() { - signature_generation_from_seed::(); -} +#[allow(clippy::similar_names)] +fn test_signature_verification_different_keys() { + let (_pk_1, sk_1) = BlsImpl::::keypair(KeyGenOption::Random); + let (pk_2, _sk_2) = BlsImpl::::keypair(KeyGenOption::Random); -#[test] -fn normal_signature_verification() { - signature_verification::(); + let signature = BlsImpl::::sign(MESSAGE_1, &sk_1); + BlsImpl::::verify(MESSAGE_1, &signature, &pk_2) + .expect_err("Signature verification for wrong public key should fail"); } -#[test] -fn small_signature_generation_from_seed() { - signature_generation_from_seed::(); +mod normal { + use super::*; + + #[test] + fn keypair_generation_from_seed() { + test_keypair_generation_from_seed::(); + } + + #[test] + fn signature_verification() { + test_signature_verification::(); + } + + #[test] + fn signature_verification_different_messages() { + test_signature_verification_different_messages::(); + } + + #[test] + fn signature_verification_different_keys() { + test_signature_verification_different_keys::(); + } } -#[test] -fn small_signature_verification() { - signature_verification::(); +mod small { + use super::*; + + #[test] + fn keypair_generation_from_seed() { + test_keypair_generation_from_seed::(); + } + + #[test] + fn signature_verification() { + test_signature_verification::(); + } + + #[test] + fn signature_verification_different_messages() { + test_signature_verification_different_messages::(); + } + + #[test] + fn signature_verification_different_keys() { + test_signature_verification_different_keys::(); + } } diff --git a/crypto/src/signature/ed25519.rs b/crypto/src/signature/ed25519.rs index 0312bff8c12..d7b4715d5ce 100644 --- a/crypto/src/signature/ed25519.rs +++ b/crypto/src/signature/ed25519.rs @@ -1,70 +1,68 @@ -use std::convert::TryFrom; +use core::{borrow::Borrow as _, convert::TryFrom}; use arrayref::array_ref; -use ed25519_dalek::{Signature, SigningKey, VerifyingKey as PK}; -use iroha_primitives::const_vec::ConstVec; -use rand::{rngs::OsRng, SeedableRng}; +use ed25519_dalek::Signature; +#[cfg(feature = "rand")] +use rand::rngs::OsRng; +use rand::SeedableRng; use rand_chacha::ChaChaRng; use sha2::Digest; use signature::{Signer as _, Verifier as _}; use zeroize::Zeroize; -const ALGORITHM: Algorithm = Algorithm::Ed25519; +use crate::{Error, KeyGenOption, ParseError}; -use crate::{Algorithm, Error, KeyGenOption, PrivateKey, PublicKey}; +pub type PublicKey = ed25519_dalek::VerifyingKey; +pub type PrivateKey = ed25519_dalek::SigningKey; -fn parse_private_key(sk: &PrivateKey) -> Result { - assert_eq!(sk.digest_function, ALGORITHM); - SigningKey::from_keypair_bytes( - &<[u8; 64]>::try_from(&sk.payload[..]).map_err(|e| Error::Parse(e.to_string()))?, - ) - .map_err(|e| Error::Parse(e.to_string())) -} - -fn parse_public_key(pk: &PublicKey) -> Result { - assert_eq!(pk.digest_function, ALGORITHM); - PK::try_from(&pk.payload[..]).map_err(|e| Error::Parse(e.to_string())) -} +#[cfg(not(feature = "std"))] +use alloc::{string::ToString as _, vec::Vec}; #[derive(Debug, Clone, Copy)] pub struct Ed25519Sha512; impl Ed25519Sha512 { - pub fn keypair(mut option: Option) -> Result<(PublicKey, PrivateKey), Error> { - let kp = match option { - Some(KeyGenOption::UseSeed(ref mut s)) => { + pub fn keypair(mut option: KeyGenOption) -> (PublicKey, PrivateKey) { + let signing_key = match option { + #[cfg(feature = "rand")] + KeyGenOption::Random => PrivateKey::generate(&mut OsRng), + KeyGenOption::UseSeed(ref mut s) => { let hash = sha2::Sha256::digest(s.as_slice()); s.zeroize(); let mut rng = ChaChaRng::from_seed(*array_ref!(hash.as_slice(), 0, 32)); - SigningKey::generate(&mut rng) + PrivateKey::generate(&mut rng) } - Some(KeyGenOption::FromPrivateKey(ref s)) => parse_private_key(s)?, - None => { - let mut rng = OsRng; - SigningKey::generate(&mut rng) + KeyGenOption::FromPrivateKey(ref s) => { + let crate::PrivateKeyInner::Ed25519(s) = s.0.borrow() else { + panic!("Wrong private key type, expected `Ed25519`, got {s:?}") + }; + PrivateKey::clone(s) } }; - Ok(( - PublicKey { - digest_function: ALGORITHM, - payload: ConstVec::new(kp.verifying_key().to_bytes().to_vec()), - }, - PrivateKey { - digest_function: ALGORITHM, - payload: ConstVec::new(kp.to_keypair_bytes().to_vec()), - }, - )) + (signing_key.verifying_key(), signing_key) } - pub fn sign(message: &[u8], sk: &PrivateKey) -> Result, Error> { - let kp = parse_private_key(sk)?; - Ok(kp.sign(message).to_bytes().to_vec()) + + pub fn parse_public_key(payload: &[u8]) -> Result { + PublicKey::from_bytes(arrayref::array_ref!(payload, 0, 32)) + .map_err(|err| ParseError(err.to_string())) } - pub fn verify(message: &[u8], signature: &[u8], pk: &PublicKey) -> Result { - let p = parse_public_key(pk)?; - let s = Signature::try_from(signature).map_err(|e| Error::Parse(e.to_string()))?; - p.verify(message, &s) - .map_err(|e| Error::Signing(e.to_string()))?; - Ok(true) + + pub fn parse_private_key(payload: &[u8]) -> Result { + <[u8; 64]>::try_from(payload) + .map_err(|err| err.to_string()) + .and_then(|payload| { + PrivateKey::from_keypair_bytes(&payload).map_err(|err| err.to_string()) + }) + .map_err(ParseError) + } + + pub fn sign(message: &[u8], sk: &PrivateKey) -> Vec { + sk.sign(message).to_bytes().to_vec() + } + + pub fn verify(message: &[u8], signature: &[u8], pk: &PublicKey) -> Result<(), Error> { + let s = Signature::try_from(signature).map_err(|e| ParseError(e.to_string()))?; + pk.verify(message, &s).map_err(|_| Error::BadSignature) } } @@ -76,7 +74,7 @@ mod test { use self::Ed25519Sha512; use super::*; - use crate::{KeyGenOption, PrivateKey, PublicKey}; + use crate::{Algorithm, KeyGenOption, PrivateKey, PublicKey}; const MESSAGE_1: &[u8] = b"This is a dummy message for use with tests"; const SIGNATURE_1: &str = "451b5b8e8725321541954997781de51f4142e4a56bab68d24f6a6b92615de5eefb74134138315859a32c7cf5fe5a488bc545e2e08e5eedfd1fb10188d532d808"; @@ -86,7 +84,7 @@ mod test { #[test] #[ignore] fn create_new_keys() { - let (p, s) = Ed25519Sha512::keypair(None).unwrap(); + let (p, s) = Ed25519Sha512::keypair(KeyGenOption::Random); println!("{s:?}"); println!("{p:?}"); @@ -95,16 +93,14 @@ mod test { #[test] fn ed25519_load_keys() { let secret = PrivateKey::from_hex(Algorithm::Ed25519, PRIVATE_KEY).unwrap(); - let sres = Ed25519Sha512::keypair(Some(KeyGenOption::FromPrivateKey(secret))); - assert!(sres.is_ok()); - let (p1, s1) = sres.unwrap(); + let (p1, s1) = Ed25519Sha512::keypair(KeyGenOption::FromPrivateKey(Box::new(secret))); assert_eq!( - s1, + PrivateKey(Box::new(crate::PrivateKeyInner::Ed25519(s1))), PrivateKey::from_hex(Algorithm::Ed25519, PRIVATE_KEY).unwrap() ); assert_eq!( - p1, + PublicKey(Box::new(crate::PublicKeyInner::Ed25519(p1))), PublicKey::from_hex(Algorithm::Ed25519, PUBLIC_KEY).unwrap() ); } @@ -112,21 +108,19 @@ mod test { #[test] fn ed25519_verify() { let secret = PrivateKey::from_hex(Algorithm::Ed25519, PRIVATE_KEY).unwrap(); - let (p, _) = Ed25519Sha512::keypair(Some(KeyGenOption::FromPrivateKey(secret))).unwrap(); + let (p, _) = Ed25519Sha512::keypair(KeyGenOption::FromPrivateKey(Box::new(secret))); - let result = - Ed25519Sha512::verify(MESSAGE_1, hex::decode(SIGNATURE_1).unwrap().as_slice(), &p); - assert!(result.is_ok()); - assert!(result.unwrap()); + Ed25519Sha512::verify(MESSAGE_1, hex::decode(SIGNATURE_1).unwrap().as_slice(), &p).unwrap(); - //Check if signatures produced here can be verified by libsodium + // Check if signatures produced here can be verified by libsodium let signature = hex::decode(SIGNATURE_1).unwrap(); + let p_bytes = p.to_bytes(); let res = unsafe { ffi::crypto_sign_ed25519_verify_detached( signature.as_slice().as_ptr(), MESSAGE_1.as_ptr(), MESSAGE_1.len() as u64, - p.payload().as_ptr(), + p_bytes.as_ptr(), ) }; assert_eq!(res, 0); @@ -135,12 +129,10 @@ mod test { #[test] fn ed25519_sign() { let secret = PrivateKey::from_hex(Algorithm::Ed25519, PRIVATE_KEY).unwrap(); - let (p, s) = Ed25519Sha512::keypair(Some(KeyGenOption::FromPrivateKey(secret))).unwrap(); + let (p, s) = Ed25519Sha512::keypair(KeyGenOption::FromPrivateKey(Box::new(secret))); - let sig = Ed25519Sha512::sign(MESSAGE_1, &s).unwrap(); - let result = Ed25519Sha512::verify(MESSAGE_1, &sig, &p); - assert!(result.is_ok()); - assert!(result.unwrap()); + let sig = Ed25519Sha512::sign(MESSAGE_1, &s); + Ed25519Sha512::verify(MESSAGE_1, &sig, &p).unwrap(); assert_eq!(sig.len(), ed25519_dalek::SIGNATURE_LENGTH); assert_eq!(hex::encode(sig.as_slice()), SIGNATURE_1); @@ -148,17 +140,16 @@ mod test { //Check if libsodium signs the message and this module still can verify it //And that private keys can sign with other libraries let mut signature = [0u8; ffi::crypto_sign_ed25519_BYTES as usize]; + let s_bytes = s.to_keypair_bytes(); unsafe { ffi::crypto_sign_ed25519_detached( signature.as_mut_ptr(), std::ptr::null_mut(), MESSAGE_1.as_ptr(), MESSAGE_1.len() as u64, - s.payload().as_ptr(), + s_bytes.as_ptr(), ) }; - let result = Ed25519Sha512::verify(MESSAGE_1, &signature, &p); - assert!(result.is_ok()); - assert!(result.unwrap()); + Ed25519Sha512::verify(MESSAGE_1, &signature, &p).unwrap(); } } diff --git a/crypto/src/signature/mod.rs b/crypto/src/signature/mod.rs index 387a939be29..1317403f8b8 100644 --- a/crypto/src/signature/mod.rs +++ b/crypto/src/signature/mod.rs @@ -1,38 +1,32 @@ // pub(crate) for inner modules it is not redundant, the contents of `signature` module get re-exported at root #![allow(clippy::redundant_pub_crate)] -#[cfg(feature = "std")] #[cfg(not(feature = "ffi_import"))] pub(crate) mod bls; -#[cfg(feature = "std")] #[cfg(not(feature = "ffi_import"))] pub(crate) mod ed25519; -#[cfg(feature = "std")] #[cfg(not(feature = "ffi_import"))] pub(crate) mod secp256k1; #[cfg(not(feature = "std"))] -use alloc::{boxed::Box, collections::btree_set, format, string::String, vec, vec::Vec}; -use core::marker::PhantomData; +use alloc::{ + boxed::Box, collections::btree_set, format, string::String, string::ToString as _, vec, + vec::Vec, +}; +use core::{borrow::Borrow as _, marker::PhantomData}; #[cfg(feature = "std")] -#[cfg(not(feature = "ffi_import"))] use std::collections::btree_set; use derive_more::{Deref, DerefMut}; -use iroha_macro::ffi_impl_opaque; use iroha_primitives::const_vec::ConstVec; use iroha_schema::{IntoSchema, TypeId}; use parity_scale_codec::{Decode, Encode}; #[cfg(not(feature = "ffi_import"))] use serde::{Deserialize, Serialize}; -#[cfg(any(feature = "std", feature = "import_ffi"))] -use crate::Error; -use crate::{ffi, PublicKey}; -#[cfg(feature = "std")] -use crate::{HashOf, KeyPair}; +use crate::{ffi, Error, HashOf, KeyPair, PublicKey}; ffi::ffi_item! { /// Represents signature of the data (`Block` or `Transaction` for example). @@ -52,7 +46,6 @@ ffi::ffi_item! { } } -#[ffi_impl_opaque] impl Signature { /// Key payload pub fn payload(&self) -> &[u8] { @@ -63,46 +56,38 @@ impl Signature { /// /// # Errors /// Fails if signing fails - #[cfg(any(feature = "std", feature = "import_ffi"))] - pub fn new(key_pair: KeyPair, payload: &[u8]) -> Result { - let (public_key, private_key) = key_pair.into(); - - let algorithm: crate::Algorithm = private_key.digest_function(); - - let signature = match algorithm { - crate::Algorithm::Ed25519 => ed25519::Ed25519Sha512::sign(payload, &private_key), - crate::Algorithm::Secp256k1 => { - secp256k1::EcdsaSecp256k1Sha256::sign(payload, &private_key) + pub fn new(key_pair: &KeyPair, payload: &[u8]) -> Self { + let signature = match key_pair.private_key.0.borrow() { + crate::PrivateKeyInner::Ed25519(sk) => ed25519::Ed25519Sha512::sign(payload, sk), + crate::PrivateKeyInner::Secp256k1(sk) => { + secp256k1::EcdsaSecp256k1Sha256::sign(payload, sk) } - crate::Algorithm::BlsSmall => bls::BlsSmall::sign(payload, &private_key), - crate::Algorithm::BlsNormal => bls::BlsNormal::sign(payload, &private_key), - }?; - Ok(Self { - public_key, + crate::PrivateKeyInner::BlsSmall(sk) => bls::BlsSmall::sign(payload, sk), + crate::PrivateKeyInner::BlsNormal(sk) => bls::BlsNormal::sign(payload, sk), + }; + Self { + public_key: key_pair.public_key.clone(), payload: ConstVec::new(signature), - }) + } } - /// Verify `message` using signed data and [`KeyPair::public_key`]. + /// Verify `payload` using signed data and [`KeyPair::public_key`]. /// /// # Errors /// Fails if message didn't pass verification - #[cfg(any(feature = "std", feature = "import_ffi"))] pub fn verify(&self, payload: &[u8]) -> Result<(), Error> { - let algorithm: crate::Algorithm = self.public_key.digest_function(); - - match algorithm { - crate::Algorithm::Ed25519 => { - ed25519::Ed25519Sha512::verify(payload, self.payload(), &self.public_key) + match self.public_key.0.borrow() { + crate::PublicKeyInner::Ed25519(pk) => { + ed25519::Ed25519Sha512::verify(payload, self.payload(), pk) } - crate::Algorithm::Secp256k1 => { - secp256k1::EcdsaSecp256k1Sha256::verify(payload, self.payload(), &self.public_key) + crate::PublicKeyInner::Secp256k1(pk) => { + secp256k1::EcdsaSecp256k1Sha256::verify(payload, self.payload(), pk) } - crate::Algorithm::BlsSmall => { - bls::BlsSmall::verify(payload, self.payload(), &self.public_key) + crate::PublicKeyInner::BlsSmall(pk) => { + bls::BlsSmall::verify(payload, self.payload(), pk) } - crate::Algorithm::BlsNormal => { - bls::BlsNormal::verify(payload, self.payload(), &self.public_key) + crate::PublicKeyInner::BlsNormal(pk) => { + bls::BlsNormal::verify(payload, self.payload(), pk) } }?; @@ -166,6 +151,7 @@ impl Clone for SignatureOf { } } +#[allow(clippy::unconditional_recursion)] // False-positive impl PartialEq for SignatureOf { fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) @@ -214,9 +200,9 @@ impl SignatureOf { /// /// # Errors /// Fails if signing fails - #[cfg(any(feature = "std", feature = "import_ffi"))] - fn from_hash(key_pair: KeyPair, hash: HashOf) -> Result { - Signature::new(key_pair, hash.as_ref()).map(|signature| Self(signature, PhantomData)) + #[inline] + fn from_hash(key_pair: &KeyPair, hash: HashOf) -> Self { + Self(Signature::new(key_pair, hash.as_ref()), PhantomData) } /// Verify signature for this hash @@ -224,13 +210,11 @@ impl SignatureOf { /// # Errors /// /// Fails if the given hash didn't pass verification - #[cfg(any(feature = "std", feature = "import_ffi"))] fn verify_hash(&self, hash: HashOf) -> Result<(), Error> { self.0.verify(hash.as_ref()) } } -#[cfg(any(feature = "std", feature = "import_ffi"))] impl SignatureOf { /// Create [`SignatureOf`] by signing the given value with [`KeyPair::private_key`]. /// The value provided will be hashed before being signed. If you already have the @@ -238,7 +222,8 @@ impl SignatureOf { /// /// # Errors /// Fails if signing fails - pub fn new(key_pair: KeyPair, value: &T) -> Result { + #[inline] + pub fn new(key_pair: &KeyPair, value: &T) -> Self { Self::from_hash(key_pair, HashOf::new(value)) } @@ -286,6 +271,7 @@ impl Clone for SignatureWrapperOf { } } +#[allow(clippy::unconditional_recursion)] // False-positive #[cfg(not(feature = "ffi_import"))] impl PartialEq for SignatureWrapperOf { fn eq(&self, other: &Self) -> bool { @@ -349,6 +335,7 @@ impl Clone for SignaturesOf { } } +#[allow(clippy::unconditional_recursion)] // False-positive #[cfg(not(feature = "ffi_import"))] impl PartialEq for SignaturesOf { fn eq(&self, other: &Self) -> bool { @@ -465,7 +452,6 @@ impl SignaturesOf { /// /// # Errors /// Fails if verificatoin of any signature fails - #[cfg(feature = "std")] pub fn verify_hash(&self, hash: HashOf) -> Result<(), SignatureVerificationFail> { self.iter().try_for_each(|signature| { signature @@ -483,15 +469,15 @@ impl SignaturesOf { } } -#[cfg(feature = "std")] #[cfg(not(feature = "ffi_import"))] impl SignaturesOf { /// Create new signatures container /// /// # Errors /// Forwards [`SignatureOf::new`] errors - pub fn new(key_pair: KeyPair, value: &T) -> Result { - SignatureOf::new(key_pair, value).map(Self::from) + #[inline] + pub fn new(key_pair: &KeyPair, value: &T) -> Self { + SignatureOf::new(key_pair, value).into() } /// Verifies all signatures @@ -540,76 +526,69 @@ impl std::error::Error for SignatureVerificationFail {} #[cfg(test)] mod tests { - #[cfg(feature = "std")] use super::*; - #[cfg(any(feature = "std", feature = "ffi_import"))] use crate::KeyGenConfiguration; #[test] - #[cfg(any(feature = "std", feature = "ffi_import"))] + #[cfg(feature = "rand")] fn create_signature_ed25519() { let key_pair = KeyPair::generate_with_configuration( - KeyGenConfiguration::default().with_algorithm(crate::Algorithm::Ed25519), + KeyGenConfiguration::from_random().with_algorithm(crate::Algorithm::Ed25519), ) .expect("Failed to generate key pair."); let message = b"Test message to sign."; - let signature = - Signature::new(key_pair.clone(), message).expect("Failed to create signature."); + let signature = Signature::new(&key_pair, message); assert!(*signature.public_key() == *key_pair.public_key()); - assert!(signature.verify(message).is_ok()); + signature.verify(message).unwrap(); } #[test] - #[cfg(any(feature = "std", feature = "ffi_import"))] + #[cfg(feature = "rand")] fn create_signature_secp256k1() { let key_pair = KeyPair::generate_with_configuration( - KeyGenConfiguration::default().with_algorithm(crate::Algorithm::Secp256k1), + KeyGenConfiguration::from_random().with_algorithm(crate::Algorithm::Secp256k1), ) .expect("Failed to generate key pair."); let message = b"Test message to sign."; - let signature = - Signature::new(key_pair.clone(), message).expect("Failed to create signature."); + let signature = Signature::new(&key_pair, message); assert!(*signature.public_key() == *key_pair.public_key()); - assert!(signature.verify(message).is_ok()); + signature.verify(message).unwrap(); } #[test] - #[cfg(any(feature = "std", feature = "ffi_import"))] + #[cfg(feature = "rand")] fn create_signature_bls_normal() { let key_pair = KeyPair::generate_with_configuration( - KeyGenConfiguration::default().with_algorithm(crate::Algorithm::BlsNormal), + KeyGenConfiguration::from_random().with_algorithm(crate::Algorithm::BlsNormal), ) .expect("Failed to generate key pair."); let message = b"Test message to sign."; - let signature = - Signature::new(key_pair.clone(), message).expect("Failed to create signature."); + let signature = Signature::new(&key_pair, message); assert!(*signature.public_key() == *key_pair.public_key()); - assert!(signature.verify(message).is_ok()); + signature.verify(message).unwrap(); } #[test] - #[cfg(any(feature = "std", feature = "ffi_import"))] + #[cfg(all(feature = "rand", any(feature = "std", feature = "ffi_import")))] fn create_signature_bls_small() { let key_pair = KeyPair::generate_with_configuration( - KeyGenConfiguration::default().with_algorithm(crate::Algorithm::BlsSmall), + KeyGenConfiguration::from_random().with_algorithm(crate::Algorithm::BlsSmall), ) .expect("Failed to generate key pair."); let message = b"Test message to sign."; - let signature = - Signature::new(key_pair.clone(), message).expect("Failed to create signature."); + let signature = Signature::new(&key_pair, message); assert!(*signature.public_key() == *key_pair.public_key()); - assert!(signature.verify(message).is_ok()); + signature.verify(message).unwrap(); } #[test] - #[cfg(feature = "std")] - #[cfg(not(feature = "ffi_import"))] + #[cfg(all(feature = "rand", not(feature = "ffi_import")))] fn signatures_of_deduplication_by_public_key() { let key_pair = KeyPair::generate().expect("Failed to generate keys"); let signatures = [ - SignatureOf::new(key_pair.clone(), &1).expect("Failed to sign"), - SignatureOf::new(key_pair.clone(), &2).expect("Failed to sign"), - SignatureOf::new(key_pair, &3).expect("Failed to sign"), + SignatureOf::new(&key_pair, &1), + SignatureOf::new(&key_pair, &2), + SignatureOf::new(&key_pair, &3), ] .into_iter() .collect::>(); @@ -618,7 +597,6 @@ mod tests { } #[test] - #[cfg(feature = "std")] #[cfg(not(feature = "ffi_import"))] fn signature_wrapper_btree_and_hash_sets_consistent_results() { use std::collections::{BTreeSet, HashSet}; @@ -631,7 +609,7 @@ mod tests { .flat_map(|key| { core::iter::repeat_with(move || key.clone()) .zip(0..) - .map(|(key, i)| SignatureOf::new(key, &i).expect("Failed to sign")) + .map(|(key, i)| SignatureOf::new(&key, &i)) .take(signatures_per_key) }) .map(SignatureWrapperOf) diff --git a/crypto/src/signature/secp256k1.rs b/crypto/src/signature/secp256k1.rs index 1939213af37..b0b50d7f878 100644 --- a/crypto/src/signature/secp256k1.rs +++ b/crypto/src/signature/secp256k1.rs @@ -1,103 +1,109 @@ +#[cfg(not(feature = "std"))] +use alloc::{format, vec::Vec}; + use self::ecdsa_secp256k1::EcdsaSecp256k1Impl; -use crate::{Algorithm, Error, KeyGenOption, PrivateKey, PublicKey}; +use crate::{Error, KeyGenOption, ParseError}; pub const PRIVATE_KEY_SIZE: usize = 32; -pub const PUBLIC_KEY_SIZE: usize = 33; - -const ALGORITHM: Algorithm = Algorithm::Secp256k1; pub struct EcdsaSecp256k1Sha256; +pub type PublicKey = k256::PublicKey; +pub type PrivateKey = k256::SecretKey; + impl EcdsaSecp256k1Sha256 { - pub fn keypair(option: Option) -> Result<(PublicKey, PrivateKey), Error> { + pub fn keypair(option: KeyGenOption) -> (PublicKey, PrivateKey) { EcdsaSecp256k1Impl::keypair(option) } - pub fn sign(message: &[u8], sk: &PrivateKey) -> Result, Error> { + + pub fn sign(message: &[u8], sk: &PrivateKey) -> Vec { EcdsaSecp256k1Impl::sign(message, sk) } - pub fn verify(message: &[u8], signature: &[u8], pk: &PublicKey) -> Result { + + pub fn verify(message: &[u8], signature: &[u8], pk: &PublicKey) -> Result<(), Error> { EcdsaSecp256k1Impl::verify(message, signature, pk) } + + pub fn parse_public_key(payload: &[u8]) -> Result { + EcdsaSecp256k1Impl::parse_public_key(payload) + } + + pub fn parse_private_key(payload: &[u8]) -> Result { + EcdsaSecp256k1Impl::parse_private_key(payload) + } } mod ecdsa_secp256k1 { - use amcl::secp256k1::ecp; + #[cfg(not(feature = "std"))] + use alloc::{format, string::ToString as _, vec::Vec}; + use core::borrow::Borrow; + use arrayref::array_ref; use digest::Digest as _; - use iroha_primitives::const_vec::ConstVec; - use rand::{rngs::OsRng, RngCore, SeedableRng}; + #[cfg(feature = "rand")] + use rand::rngs::OsRng; + use rand::{RngCore, SeedableRng}; use rand_chacha::ChaChaRng; use signature::{Signer as _, Verifier as _}; use zeroize::Zeroize; - use super::{ALGORITHM, PRIVATE_KEY_SIZE, PUBLIC_KEY_SIZE}; - use crate::{Error, KeyGenOption, PrivateKey, PublicKey}; + use super::{PrivateKey, PublicKey, PRIVATE_KEY_SIZE}; + use crate::{Error, KeyGenOption, ParseError}; pub struct EcdsaSecp256k1Impl; type Digest = sha2::Sha256; impl EcdsaSecp256k1Impl { - pub fn public_key_compressed(pk: &PublicKey) -> Vec { - assert_eq!(pk.digest_function, ALGORITHM); - let mut compressed = [0u8; PUBLIC_KEY_SIZE]; - ecp::ECP::frombytes(&pk.payload[..]).tobytes(&mut compressed, true); - compressed.to_vec() - } - - pub fn keypair(option: Option) -> Result<(PublicKey, PrivateKey), Error> { + pub fn keypair(mut option: KeyGenOption) -> (PublicKey, PrivateKey) { let signing_key = match option { - Some(mut o) => match o { - KeyGenOption::UseSeed(ref mut seed) => { - let mut s = [0u8; PRIVATE_KEY_SIZE]; - let mut rng = ChaChaRng::from_seed(*array_ref!(seed.as_slice(), 0, 32)); - seed.zeroize(); - rng.fill_bytes(&mut s); - let k = Digest::digest(s); - s.zeroize(); - k256::SecretKey::from_slice(k.as_slice())? - } - KeyGenOption::FromPrivateKey(ref s) => { - assert_eq!(s.digest_function, ALGORITHM); - k256::SecretKey::from_slice(&s.payload[..])? - } - }, - None => k256::SecretKey::random(&mut OsRng), + #[cfg(feature = "rand")] + KeyGenOption::Random => PrivateKey::random(&mut OsRng), + KeyGenOption::UseSeed(ref mut seed) => { + let mut s = [0u8; PRIVATE_KEY_SIZE]; + let mut rng = ChaChaRng::from_seed(*array_ref!(seed.as_slice(), 0, 32)); + seed.zeroize(); + rng.fill_bytes(&mut s); + let k = Digest::digest(s); + s.zeroize(); + PrivateKey::from_slice(k.as_slice()) + .expect("Creating private key from seed should always succeed") + } + KeyGenOption::FromPrivateKey(ref s) => { + let crate::PrivateKeyInner::Secp256k1(s) = s.0.borrow() else { + panic!("Wrong private key type, expected `Secp256k1`, got {s:?}") + }; + s.clone() + } }; let public_key = signing_key.public_key(); - let compressed = public_key.to_sec1_bytes(); //serialized as compressed point - Ok(( - PublicKey { - digest_function: ALGORITHM, - payload: ConstVec::new(compressed), - }, - PrivateKey { - digest_function: ALGORITHM, - payload: ConstVec::new(signing_key.to_bytes().to_vec()), - }, - )) + (public_key, signing_key) } - pub fn sign(message: &[u8], sk: &PrivateKey) -> Result, Error> { - assert_eq!(sk.digest_function, ALGORITHM); - let signing_key = k256::SecretKey::from_slice(&sk.payload[..]) - .map_err(|e| Error::Signing(format!("{e:?}")))?; - let signing_key = k256::ecdsa::SigningKey::from(signing_key); + pub fn sign(message: &[u8], sk: &PrivateKey) -> Vec { + let signing_key = k256::ecdsa::SigningKey::from(sk); let signature: k256::ecdsa::Signature = signing_key.sign(message); - Ok(signature.to_bytes().to_vec()) + signature.to_bytes().to_vec() } - pub fn verify(message: &[u8], signature: &[u8], pk: &PublicKey) -> Result { - let compressed_pk = Self::public_key_compressed(pk); - let verifying_key = k256::PublicKey::from_sec1_bytes(&compressed_pk) - .map_err(|e| Error::Signing(format!("{e:?}")))?; + pub fn verify(message: &[u8], signature: &[u8], pk: &PublicKey) -> Result<(), Error> { let signature = k256::ecdsa::Signature::from_slice(signature) .map_err(|e| Error::Signing(format!("{e:?}")))?; - let verifying_key = k256::ecdsa::VerifyingKey::from(verifying_key); + let verifying_key = k256::ecdsa::VerifyingKey::from(pk); - Ok(verifying_key.verify(message, &signature).is_ok()) + verifying_key + .verify(message, &signature) + .map_err(|_| Error::BadSignature) + } + + pub fn parse_public_key(payload: &[u8]) -> Result { + PublicKey::from_sec1_bytes(payload).map_err(|err| ParseError(err.to_string())) + } + + pub fn parse_private_key(payload: &[u8]) -> Result { + PrivateKey::from_slice(payload).map_err(|err| ParseError(err.to_string())) } } } @@ -127,39 +133,33 @@ mod test { const PRIVATE_KEY: &str = "e4f21b38e005d4f895a29e84948d7cc83eac79041aeb644ee4fab8d9da42f713"; const PUBLIC_KEY: &str = "0242c1e1f775237a26da4fd51b8d75ee2709711f6e90303e511169a324ef0789c0"; + fn private_key() -> PrivateKey { + let payload = hex::decode(PRIVATE_KEY).unwrap(); + EcdsaSecp256k1Sha256::parse_private_key(&payload).unwrap() + } + + fn public_key() -> PublicKey { + let payload = hex::decode(PUBLIC_KEY).unwrap(); + EcdsaSecp256k1Sha256::parse_public_key(&payload).unwrap() + } + fn public_key_uncompressed(pk: &PublicKey) -> Vec { const PUBLIC_UNCOMPRESSED_KEY_SIZE: usize = 65; - assert_eq!(pk.digest_function, ALGORITHM); let mut uncompressed = [0u8; PUBLIC_UNCOMPRESSED_KEY_SIZE]; - ecp::ECP::frombytes(&pk.payload[..]).tobytes(&mut uncompressed, false); + ecp::ECP::frombytes(&pk.to_sec1_bytes()[..]).tobytes(&mut uncompressed, false); uncompressed.to_vec() } - #[test] - #[ignore] - fn create_new_keys() { - let (s, p) = EcdsaSecp256k1Sha256::keypair(None).unwrap(); - - println!("{s:?}"); - println!("{p:?}"); - } - - #[test] - fn secp256k1_load_keys() { - let secret = PrivateKey::from_hex(ALGORITHM, PRIVATE_KEY).unwrap(); - let _sres = - EcdsaSecp256k1Sha256::keypair(Some(KeyGenOption::FromPrivateKey(secret))).unwrap(); - } - #[test] fn secp256k1_compatibility() { - let secret = PrivateKey::from_hex(ALGORITHM, PRIVATE_KEY).unwrap(); - let (p, s) = - EcdsaSecp256k1Sha256::keypair(Some(KeyGenOption::FromPrivateKey(secret))).unwrap(); + let secret = private_key(); + let (p, s) = EcdsaSecp256k1Sha256::keypair(KeyGenOption::FromPrivateKey(Box::new( + crate::PrivateKey(Box::new(crate::PrivateKeyInner::Secp256k1(secret))), + ))); - let _sk = secp256k1::SecretKey::from_slice(s.payload()).unwrap(); - let _pk = secp256k1::PublicKey::from_slice(p.payload()).unwrap(); + let _sk = secp256k1::SecretKey::from_slice(&s.to_bytes()).unwrap(); + let _pk = secp256k1::PublicKey::from_slice(&p.to_sec1_bytes()).unwrap(); let openssl_group = EcGroup::from_curve_name(Nid::SECP256K1).unwrap(); let mut ctx = BigNumContext::new().unwrap(); @@ -170,16 +170,10 @@ mod test { #[test] fn secp256k1_verify() { - let p = PublicKey::from_hex(ALGORITHM, PUBLIC_KEY).unwrap(); + let p = public_key(); - let result = EcdsaSecp256k1Sha256::verify( - MESSAGE_1, - hex::decode(SIGNATURE_1).unwrap().as_slice(), - &p, - ); - // we are returning a `Result` - // unwrap will catch the `Err(_)`, and assert will catch the `false` - assert!(result.unwrap()); + EcdsaSecp256k1Sha256::verify(MESSAGE_1, hex::decode(SIGNATURE_1).unwrap().as_slice(), &p) + .unwrap(); let context = secp256k1::Secp256k1::new(); let pk = @@ -211,15 +205,13 @@ mod test { #[test] fn secp256k1_sign() { - let secret = PrivateKey::from_hex(ALGORITHM, PRIVATE_KEY).unwrap(); - let (pk, sk) = - EcdsaSecp256k1Sha256::keypair(Some(KeyGenOption::FromPrivateKey(secret))).unwrap(); + let secret = private_key(); + let (pk, sk) = EcdsaSecp256k1Sha256::keypair(KeyGenOption::FromPrivateKey(Box::new( + crate::PrivateKey(Box::new(crate::PrivateKeyInner::Secp256k1(secret))), + ))); - let sig = EcdsaSecp256k1Sha256::sign(MESSAGE_1, &sk).unwrap(); - let result = EcdsaSecp256k1Sha256::verify(MESSAGE_1, &sig, &pk); - // we are returning a `Result` - // unwrap will catch the `Err(_)`, and assert will catch the `false` - assert!(result.unwrap()); + let sig = EcdsaSecp256k1Sha256::sign(MESSAGE_1, &sk); + EcdsaSecp256k1Sha256::verify(MESSAGE_1, &sig, &pk).unwrap(); assert_eq!(sig.len(), 64); @@ -234,8 +226,7 @@ mod test { let msg = secp256k1::Message::from_digest_slice(h.as_slice()).unwrap(); let sig_1 = context.sign_ecdsa(&msg, &sk).serialize_compact(); - let result = EcdsaSecp256k1Sha256::verify(MESSAGE_1, &sig_1, &pk); - assert!(result.unwrap()); + EcdsaSecp256k1Sha256::verify(MESSAGE_1, &sig_1, &pk).unwrap(); let openssl_group = EcGroup::from_curve_name(Nid::SECP256K1).unwrap(); let mut ctx = BigNumContext::new().unwrap(); @@ -280,12 +271,10 @@ mod test { res }; - let result = EcdsaSecp256k1Sha256::verify(MESSAGE_1, openssl_sig.as_slice(), &pk); - assert!(result.unwrap()); + EcdsaSecp256k1Sha256::verify(MESSAGE_1, openssl_sig.as_slice(), &pk).unwrap(); - let (p, s) = EcdsaSecp256k1Sha256::keypair(None).unwrap(); - let signed = EcdsaSecp256k1Sha256::sign(MESSAGE_1, &s).unwrap(); - let result = EcdsaSecp256k1Sha256::verify(MESSAGE_1, &signed, &p); - assert!(result.unwrap()); + let (p, s) = EcdsaSecp256k1Sha256::keypair(KeyGenOption::Random); + let signed = EcdsaSecp256k1Sha256::sign(MESSAGE_1, &s); + EcdsaSecp256k1Sha256::verify(MESSAGE_1, &signed, &p).unwrap(); } } diff --git a/data_model/src/block.rs b/data_model/src/block.rs index 0eb2cb5079c..3ad6900d32d 100644 --- a/data_model/src/block.rs +++ b/data_model/src/block.rs @@ -194,18 +194,14 @@ impl SignedBlock { } /// Add additional signatures to this block - /// - /// # Errors - /// - /// If given signature doesn't match block hash #[cfg(feature = "std")] #[cfg(feature = "transparent_api")] - pub fn sign(mut self, key_pair: KeyPair) -> Result { - iroha_crypto::SignatureOf::new(key_pair, self.payload()).map(|signature| { - let SignedBlock::V1(block) = &mut self; - block.signatures.insert(signature); - self - }) + #[must_use] + pub fn sign(mut self, key_pair: &KeyPair) -> Self { + let signature = iroha_crypto::SignatureOf::new(key_pair, self.payload()); + let SignedBlock::V1(block) = &mut self; + block.signatures.insert(signature); + self } /// Add additional signatures to this block diff --git a/data_model/src/domain.rs b/data_model/src/domain.rs index 850eb3304a6..dff9384d1de 100644 --- a/data_model/src/domain.rs +++ b/data_model/src/domain.rs @@ -78,7 +78,7 @@ pub mod model { pub asset_definitions: AssetDefinitionsMap, /// Total amount of [`Asset`]. pub asset_total_quantities: AssetTotalQuantityMap, - /// IPFS link to the [`Domain`] logo + /// IPFS link to the [`Domain`] logo. #[getset(get = "pub")] pub logo: Option, /// [`Metadata`] of this `Domain` as a key-value store. diff --git a/data_model/src/peer.rs b/data_model/src/peer.rs index 866e753a456..f5da00a8bb5 100644 --- a/data_model/src/peer.rs +++ b/data_model/src/peer.rs @@ -9,7 +9,6 @@ use core::{ }; use derive_more::Display; -use getset::Getters; use iroha_data_model_derive::{model, IdEqOrdHash}; use iroha_primitives::addr::SocketAddr; use iroha_schema::IntoSchema; @@ -21,6 +20,8 @@ use crate::{Identifiable, PublicKey, Registered, Value}; #[model] pub mod model { + use getset::Getters; + use super::*; /// Peer's identification. @@ -28,7 +29,7 @@ pub mod model { /// Equality is tested by `public_key` field only. /// Each peer should have a unique public key. #[derive( - Debug, Display, Clone, Eq, Getters, Decode, Encode, Deserialize, Serialize, IntoSchema, + Debug, Display, Clone, Eq, Decode, Encode, Deserialize, Serialize, IntoSchema, Getters, )] #[display(fmt = "{public_key}@@{address}")] #[getset(get = "pub")] @@ -36,7 +37,6 @@ pub mod model { pub struct PeerId { /// Address of the [`Peer`]'s entrypoint. // TODO: Derive with getset once FFI impl is fixed - #[getset(skip)] pub address: SocketAddr, /// Public Key of the [`Peer`]. pub public_key: PublicKey, @@ -58,21 +58,14 @@ pub mod model { } impl PeerId { - /// Construct `PeerId` given `public_key` and `address`. + /// Construct [`PeerId`] given `public_key` and `address`. #[inline] - pub fn new(address: &SocketAddr, public_key: &PublicKey) -> Self { + pub fn new(address: SocketAddr, public_key: PublicKey) -> Self { Self { - address: address.clone(), - public_key: public_key.clone(), + address, + public_key, } } - /// Serialize the data contained in this Id for use in hashing. - pub fn payload(&self) -> Vec { - let mut data = Vec::new(); - data.extend(self.address.payload()); - data.extend(self.public_key.payload()); - data - } } impl Peer { diff --git a/data_model/src/predicate.rs b/data_model/src/predicate.rs index 4e5d72d8c0c..87e641da98d 100644 --- a/data_model/src/predicate.rs +++ b/data_model/src/predicate.rs @@ -366,6 +366,8 @@ pub mod string { #[cfg(test)] mod tests { + use iroha_primitives::addr::socket_addr; + use super::*; mod id_box { @@ -496,10 +498,7 @@ pub mod string { let (public_key, _) = iroha_crypto::KeyPair::generate() .expect("Should not panic") .into(); - let id = IdBox::PeerId(peer::PeerId { - address: "localhost:123".parse().unwrap(), - public_key, - }); + let id = IdBox::PeerId(peer::PeerId::new(socket_addr!(127.0.0.1:123), public_key)); assert!(StringPredicate::contains("123").applies(&id)); } } @@ -1155,6 +1154,7 @@ pub mod value { #[cfg(test)] mod test { + use iroha_primitives::addr::socket_addr; use peer::Peer; use prelude::Metadata; @@ -1197,10 +1197,7 @@ pub mod value { assert!( !pred.applies(&Value::Identifiable(IdentifiableBox::Peer(Peer { - id: peer::PeerId { - address: "localhost:123".parse().unwrap(), - public_key - } + id: peer::PeerId::new(socket_addr!(127.0.0.1:123), public_key) }))) ); } diff --git a/data_model/src/query/mod.rs b/data_model/src/query/mod.rs index 6d96d52634f..46100020b12 100644 --- a/data_model/src/query/mod.rs +++ b/data_model/src/query/mod.rs @@ -1231,16 +1231,13 @@ pub mod http { /// # Errors /// Fails if signature creation fails. #[inline] - pub fn sign( - self, - key_pair: iroha_crypto::KeyPair, - ) -> Result { - SignatureOf::new(key_pair, &self.payload) - .map(|signature| SignedQueryV1 { - payload: self.payload, - signature, - }) - .map(Into::into) + #[must_use] + pub fn sign(self, key_pair: &iroha_crypto::KeyPair) -> SignedQuery { + SignedQueryV1 { + signature: SignatureOf::new(key_pair, &self.payload), + payload: self.payload, + } + .into() } } diff --git a/data_model/src/transaction.rs b/data_model/src/transaction.rs index 4517d2ce8b9..59eacb1c9e5 100644 --- a/data_model/src/transaction.rs +++ b/data_model/src/transaction.rs @@ -279,24 +279,18 @@ impl SignedTransaction { } /// Sign transaction with provided key pair. - /// - /// # Errors - /// - /// Fails if signature creation fails #[cfg(feature = "std")] - pub fn sign( - self, - key_pair: iroha_crypto::KeyPair, - ) -> Result { + #[must_use] + pub fn sign(self, key_pair: &iroha_crypto::KeyPair) -> SignedTransaction { let SignedTransaction::V1(mut tx) = self; - let signature = iroha_crypto::SignatureOf::new(key_pair, &tx.payload)?; + let signature = iroha_crypto::SignatureOf::new(key_pair, &tx.payload); tx.signatures.insert(signature); - Ok(SignedTransactionV1 { + SignedTransactionV1 { payload: tx.payload, signatures: tx.signatures, } - .into()) + .into() } /// Add additional signatures to this transaction @@ -745,22 +739,16 @@ mod http { } /// Sign transaction with provided key pair. - /// - /// # Errors - /// - /// Fails if signature creation fails #[cfg(feature = "std")] - pub fn sign( - self, - key_pair: iroha_crypto::KeyPair, - ) -> Result { - let signatures = SignaturesOf::new(key_pair, &self.payload)?; + #[must_use] + pub fn sign(self, key_pair: &iroha_crypto::KeyPair) -> SignedTransaction { + let signatures = SignaturesOf::new(key_pair, &self.payload); - Ok(SignedTransactionV1 { + SignedTransactionV1 { payload: self.payload, signatures, } - .into()) + .into() } } } diff --git a/default_executor/Cargo.toml b/default_executor/Cargo.toml index fea5b33a828..f97291b7a7d 100644 --- a/default_executor/Cargo.toml +++ b/default_executor/Cargo.toml @@ -24,7 +24,8 @@ opt-level = "z" # Optimize for size vs speed with "s"/"z"(removes vectorizat codegen-units = 1 # Further reduces binary size but increases compilation time [dependencies] -iroha_executor = { version = "2.0.0-pre-rc.20", path = "../smart_contract/executor", features = ["debug"]} +iroha_executor = { version = "2.0.0-pre-rc.20", path = "../smart_contract/executor", features = ["debug"] } +getrandom = { version = "0.2", features = ["custom"] } lol_alloc = "0.4.0" panic-halt = "0.2.0" diff --git a/default_executor/src/lib.rs b/default_executor/src/lib.rs index 6214fdaf03b..0dfffd7c8ea 100644 --- a/default_executor/src/lib.rs +++ b/default_executor/src/lib.rs @@ -14,6 +14,8 @@ use lol_alloc::{FreeListAllocator, LockedAllocator}; #[global_allocator] static ALLOC: LockedAllocator = LockedAllocator::new(FreeListAllocator::new()); +getrandom::register_custom_getrandom!(iroha_executor::stub_getrandom); + /// Executor that replaces some of [`Validate`]'s methods with sensible defaults /// /// # Warning diff --git a/docker-compose.dev.single.yml b/docker-compose.dev.single.yml deleted file mode 100644 index 2bef3e4873e..00000000000 --- a/docker-compose.dev.single.yml +++ /dev/null @@ -1,25 +0,0 @@ -# This file is generated by iroha_swarm. -# Do not edit it manually. - -version: '3.8' -services: - iroha0: - build: ./ - platform: linux/amd64 - environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"}' - TORII_P2P_ADDR: iroha0:1337 - TORII_API_URL: iroha0:8080 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 - IROHA_GENESIS_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"82b3bde54aebeca4146257da0de8d59d8e46d5fe34887dcd8072866792fcb3ad4164bf554923ece1fd412d241036d863a6ae430476c898248b8237d77534cfc4"}' - IROHA_GENESIS_FILE: /config/genesis.json - ports: - - 1337:1337 - - 8080:8080 - volumes: - - ./configs/peer:/config - init: true - command: iroha --submit-genesis diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml deleted file mode 100644 index c35f6a859db..00000000000 --- a/docker-compose.dev.yml +++ /dev/null @@ -1,80 +0,0 @@ -# This file is generated by iroha_swarm. -# Do not edit it manually. - -version: '3.8' -services: - iroha0: - image: hyperledger/iroha2:dev - platform: linux/amd64 - environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"}' - TORII_P2P_ADDR: iroha0:1337 - TORII_API_URL: iroha0:8080 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 - IROHA_GENESIS_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"82b3bde54aebeca4146257da0de8d59d8e46d5fe34887dcd8072866792fcb3ad4164bf554923ece1fd412d241036d863a6ae430476c898248b8237d77534cfc4"}' - IROHA_GENESIS_FILE: /config/genesis.json - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' - ports: - - 1337:1337 - - 8080:8080 - volumes: - - ./configs/peer:/config - init: true - command: iroha --submit-genesis - iroha1: - image: hyperledger/iroha2:dev - platform: linux/amd64 - environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"c02ffad5e455e7ec620d74de5769681e4d8385906bce5a437eb67452a9efbbc2815bbdc9775d28c3633269b25f22d048e2aa2e36017cbe5ad85f15220beb6f6f"}' - TORII_P2P_ADDR: iroha1:1338 - TORII_API_URL: iroha1:8081 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' - ports: - - 1338:1338 - - 8081:8081 - volumes: - - ./configs/peer:/config - init: true - iroha2: - image: hyperledger/iroha2:dev - platform: linux/amd64 - environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736 - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"29c5ed1409cb10fd791bc4ff8a6cb5e22a5fae7e36f448ef3ea2988b1319a88bf417e0371e6adb32fd66749477402b1ab67f84a8e9b082e997980cc91f327736"}' - TORII_P2P_ADDR: iroha2:1339 - TORII_API_URL: iroha2:8082 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"}]' - ports: - - 1339:1339 - - 8082:8082 - volumes: - - ./configs/peer:/config - init: true - iroha3: - image: hyperledger/iroha2:dev - platform: linux/amd64 - environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61 - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"5eed4855fad183c451aac39dfc50831607e4cf408c98e2b977f3ce4a2df42ce2a66522370d60b9c09e79ade2e9bb1ef2e78733a944b999b3a6aee687ce476d61"}' - TORII_P2P_ADDR: iroha3:1340 - TORII_API_URL: iroha3:8083 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' - ports: - - 1340:1340 - - 8083:8083 - volumes: - - ./configs/peer:/config - init: true diff --git a/docker-compose.dev.local.yml b/docker-compose.local.yml similarity index 82% rename from docker-compose.dev.local.yml rename to docker-compose.local.yml index 42652c22646..6c2cd371db2 100644 --- a/docker-compose.dev.local.yml +++ b/docker-compose.local.yml @@ -11,8 +11,8 @@ services: IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"}' - TORII_P2P_ADDR: iroha0:1337 - TORII_API_URL: iroha0:8080 + TORII_P2P_ADDR: 0.0.0.0:1337 + TORII_API_URL: 0.0.0.0:8080 IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 IROHA_GENESIS_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"82b3bde54aebeca4146257da0de8d59d8e46d5fe34887dcd8072866792fcb3ad4164bf554923ece1fd412d241036d863a6ae430476c898248b8237d77534cfc4"}' IROHA_GENESIS_FILE: /config/genesis.json @@ -24,6 +24,12 @@ services: - ./configs/peer:/config init: true command: iroha --submit-genesis + healthcheck: + test: test $(curl -s http://127.0.0.1:8080/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s iroha1: build: ./ platform: linux/amd64 @@ -32,8 +38,8 @@ services: IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"c02ffad5e455e7ec620d74de5769681e4d8385906bce5a437eb67452a9efbbc2815bbdc9775d28c3633269b25f22d048e2aa2e36017cbe5ad85f15220beb6f6f"}' - TORII_P2P_ADDR: iroha1:1338 - TORII_API_URL: iroha1:8081 + TORII_P2P_ADDR: 0.0.0.0:1338 + TORII_API_URL: 0.0.0.0:8081 IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' ports: @@ -42,6 +48,12 @@ services: volumes: - ./configs/peer:/config init: true + healthcheck: + test: test $(curl -s http://127.0.0.1:8081/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s iroha2: build: ./ platform: linux/amd64 @@ -50,8 +62,8 @@ services: IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736 IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"29c5ed1409cb10fd791bc4ff8a6cb5e22a5fae7e36f448ef3ea2988b1319a88bf417e0371e6adb32fd66749477402b1ab67f84a8e9b082e997980cc91f327736"}' - TORII_P2P_ADDR: iroha2:1339 - TORII_API_URL: iroha2:8082 + TORII_P2P_ADDR: 0.0.0.0:1339 + TORII_API_URL: 0.0.0.0:8082 IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"}]' ports: @@ -60,6 +72,12 @@ services: volumes: - ./configs/peer:/config init: true + healthcheck: + test: test $(curl -s http://127.0.0.1:8082/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s iroha3: build: ./ platform: linux/amd64 @@ -68,8 +86,8 @@ services: IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61 IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"5eed4855fad183c451aac39dfc50831607e4cf408c98e2b977f3ce4a2df42ce2a66522370d60b9c09e79ade2e9bb1ef2e78733a944b999b3a6aee687ce476d61"}' - TORII_P2P_ADDR: iroha3:1340 - TORII_API_URL: iroha3:8083 + TORII_P2P_ADDR: 0.0.0.0:1340 + TORII_API_URL: 0.0.0.0:8083 IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' ports: @@ -78,3 +96,9 @@ services: volumes: - ./configs/peer:/config init: true + healthcheck: + test: test $(curl -s http://127.0.0.1:8083/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s diff --git a/docker-compose.single.yml b/docker-compose.single.yml index 454f92ff312..240d84cf190 100644 --- a/docker-compose.single.yml +++ b/docker-compose.single.yml @@ -1,26 +1,31 @@ -version: "3.8" +# This file is generated by iroha_swarm. +# Do not edit it manually. + +version: '3.8' services: iroha0: - build: . - image: iroha2:lts + build: ./ platform: linux/amd64 environment: - TORII_P2P_ADDR: iroha0:1337 - TORII_API_URL: iroha0:8080 - TORII_TELEMETRY_URL: iroha0:8180 - IROHA_PUBLIC_KEY: "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B" - IROHA_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "282ED9F3CF92811C3818DBC4AE594ED59DC1A2F78E4241E31924E101D6B1FB831C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B"}' - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337", "public_key": "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B"}]' - IROHA_GENESIS_ACCOUNT_PUBLIC_KEY: 'ed01203F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255' - IROHA_GENESIS_ACCOUNT_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "038AE16B219DA35AA036335ED0A43C28A2CC737150112C78A7B8034B9D99C9023F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255"}' - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_COUNT_LIMIT: 100 - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_PERIOD_MS: 500 - IROHA_GENESIS_GENESIS_SUBMISSION_DELAY_MS: 1000 + IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 + IROHA_CONFIG: /config/config.json + IROHA_PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB + IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"}' + TORII_P2P_ADDR: 0.0.0.0:1337 + TORII_API_URL: 0.0.0.0:8080 + IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + IROHA_GENESIS_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"82b3bde54aebeca4146257da0de8d59d8e46d5fe34887dcd8072866792fcb3ad4164bf554923ece1fd412d241036d863a6ae430476c898248b8237d77534cfc4"}' + IROHA_GENESIS_FILE: /config/genesis.json ports: - - "1337:1337" - - "8080:8080" - - "8180:8180" + - 1337:1337 + - 8080:8080 + volumes: + - ./configs/peer:/config init: true command: iroha --submit-genesis - volumes: - - './configs/peer/lts:/config' + healthcheck: + test: test $(curl -s http://127.0.0.1:8080/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s diff --git a/docker-compose.stable.single.yml b/docker-compose.stable.single.yml deleted file mode 100644 index dfb250f6d9f..00000000000 --- a/docker-compose.stable.single.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: "3.8" -services: - iroha0: - build: . - image: iroha2:stable - platform: linux/amd64 - environment: - TORII_P2P_ADDR: iroha0:1337 - TORII_API_URL: iroha0:8080 - TORII_TELEMETRY_URL: iroha0:8180 - IROHA_PUBLIC_KEY: "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B" - IROHA_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "282ED9F3CF92811C3818DBC4AE594ED59DC1A2F78E4241E31924E101D6B1FB831C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B"}' - IROHA_GENESIS_ACCOUNT_PUBLIC_KEY: 'ed01203F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255' - IROHA_GENESIS_ACCOUNT_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "038AE16B219DA35AA036335ED0A43C28A2CC737150112C78A7B8034B9D99C9023F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255"}' - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_COUNT_LIMIT: 100 - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_PERIOD_MS: 500 - IROHA_GENESIS_GENESIS_SUBMISSION_DELAY_MS: 1000 - ports: - - "1337:1337" - - "8080:8080" - init: true - command: iroha --submit-genesis - volumes: - - './configs/peer/stable:/config' diff --git a/docker-compose.stable.yml b/docker-compose.stable.yml deleted file mode 100644 index 95cdf9e04d8..00000000000 --- a/docker-compose.stable.yml +++ /dev/null @@ -1,87 +0,0 @@ -version: "3.8" -services: - iroha0: - image: hyperledger/iroha2:stable - platform: linux/amd64 - environment: - TORII_P2P_ADDR: iroha0:1337 - TORII_API_URL: iroha0:8080 - TORII_TELEMETRY_URL: iroha0:8180 - IROHA_PUBLIC_KEY: "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B" - IROHA_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "282ED9F3CF92811C3818DBC4AE594ED59DC1A2F78E4241E31924E101D6B1FB831C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B"}' - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338", "public_key": "ed0120CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1"}, {"address": "iroha2:1339", "public_key": "ed0120FACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020"}, {"address": "iroha3:1340", "public_key": "ed01208E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F"}]' - IROHA_GENESIS_ACCOUNT_PUBLIC_KEY: 'ed01203f4e3e98571b55514edc5ccf7e53ca7509d89b2868e62921180a6f57c2f4e255' - IROHA_GENESIS_ACCOUNT_PRIVATE_KEY: '{ "digest_function": "ed25519", "payload": "038AE16B219DA35AA036335ED0A43C28A2CC737150112C78A7B8034B9D99C9023F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255" }' - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_COUNT_LIMIT: 100 - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_PERIOD_MS: 500 - IROHA_GENESIS_GENESIS_SUBMISSION_DELAY_MS: 1000 - ports: - - "1337:1337" - - "8080:8080" - volumes: - - './configs/peer/stable:/config' - init: true - command: iroha --submit-genesis - - iroha1: - image: hyperledger/iroha2:stable - platform: linux/amd64 - environment: - TORII_P2P_ADDR: iroha1:1338 - TORII_API_URL: iroha1:8081 - TORII_TELEMETRY_URL: iroha1:8181 - IROHA_PUBLIC_KEY: "ed0120CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1" - IROHA_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "3BAC34CDA9E3763FA069C1198312D1EC73B53023B8180C822AC355435EDC4A24CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1"}' - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337", "public_key": "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B"}, {"address": "iroha2:1339", "public_key": "ed0120FACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020"}, {"address": "iroha3:1340", "public_key": "ed01208E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F"}]' - IROHA_GENESIS_ACCOUNT_PUBLIC_KEY: 'ed01203F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255' - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_COUNT_LIMIT: 100 - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_PERIOD_MS: 500 - IROHA_GENESIS_GENESIS_SUBMISSION_DELAY_MS: 1000 - ports: - - "1338:1338" - - "8081:8081" - volumes: - - './configs/peer/stable:/config' - init: true - - iroha2: - image: hyperledger/iroha2:stable - platform: linux/amd64 - environment: - TORII_P2P_ADDR: iroha2:1339 - TORII_API_URL: iroha2:8082 - TORII_TELEMETRY_URL: iroha2:8182 - IROHA_PUBLIC_KEY: "ed0120FACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020" - IROHA_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "1261A436D36779223D7D6CF20E8B644510E488E6A50BAFD77A7485264D27197DFACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020"}' - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337", "public_key": "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B"}, {"address":"iroha1:1338", "public_key": "ed0120CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1"}, {"address": "iroha3:1340", "public_key": "ed01208E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F"}]' - IROHA_GENESIS_ACCOUNT_PUBLIC_KEY: 'ed01203F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255' - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_COUNT_LIMIT: 100 - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_PERIOD_MS: 500 - IROHA_GENESIS_GENESIS_SUBMISSION_DELAY_MS: 1000 - ports: - - "1339:1339" - - "8082:8082" - volumes: - - './configs/peer/stable:/config' - init: true - - iroha3: - image: hyperledger/iroha2:stable - platform: linux/amd64 - environment: - TORII_P2P_ADDR: iroha3:1340 - TORII_API_URL: iroha3:8083 - TORII_TELEMETRY_URL: iroha3:8183 - IROHA_PUBLIC_KEY: "ed01208E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F" - IROHA_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "A70DAB95C7482EB9F159111B65947E482108CFE67DF877BD8D3B9441A781C7C98E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F"}' - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337", "public_key": "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B"}, {"address":"iroha1:1338", "public_key": "ed0120CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1"}, {"address": "iroha2:1339", "public_key": "ed0120FACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020"}]' - IROHA_GENESIS_ACCOUNT_PUBLIC_KEY: 'ed01203F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255' - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_COUNT_LIMIT: 100 - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_PERIOD_MS: 500 - IROHA_GENESIS_GENESIS_SUBMISSION_DELAY_MS: 1000 - ports: - - "1340:1340" - - "8083:8083" - volumes: - - './configs/peer/stable:/config' - init: true diff --git a/docker-compose.yml b/docker-compose.yml index d24025c2fb3..af679a88066 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,91 +1,104 @@ -version: "3.8" +# This file is generated by iroha_swarm. +# Do not edit it manually. + +version: '3.8' services: iroha0: - image: hyperledger/iroha2:lts + image: hyperledger/iroha2:dev platform: linux/amd64 environment: - TORII_P2P_ADDR: iroha0:1337 - TORII_API_URL: iroha0:8080 - TORII_TELEMETRY_URL: iroha0:8180 - IROHA_PUBLIC_KEY: "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B" - IROHA_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "282ED9F3CF92811C3818DBC4AE594ED59DC1A2F78E4241E31924E101D6B1FB831C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B"}' - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337", "public_key": "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B"}, {"address":"iroha1:1338", "public_key": "ed0120CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1"}, {"address": "iroha2:1339", "public_key": "ed0120FACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020"}, {"address": "iroha3:1340", "public_key": "ed01208E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F"}]' - IROHA_GENESIS_ACCOUNT_PUBLIC_KEY: 'ed01203F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255' - IROHA_GENESIS_ACCOUNT_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "038AE16B219DA35AA036335ED0A43C28A2CC737150112C78A7B8034B9D99C9023F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255"}' - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_COUNT_LIMIT: 100 - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_PERIOD_MS: 500 - IROHA_GENESIS_GENESIS_SUBMISSION_DELAY_MS: 1000 + IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 + IROHA_CONFIG: /config/config.json + IROHA_PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB + IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"}' + TORII_P2P_ADDR: 0.0.0.0:1337 + TORII_API_URL: 0.0.0.0:8080 + IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + IROHA_GENESIS_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"82b3bde54aebeca4146257da0de8d59d8e46d5fe34887dcd8072866792fcb3ad4164bf554923ece1fd412d241036d863a6ae430476c898248b8237d77534cfc4"}' + IROHA_GENESIS_FILE: /config/genesis.json + SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' ports: - - "1337:1337" - - "8080:8080" - - "8180:8180" + - 1337:1337 + - 8080:8080 volumes: - - './configs/peer/lts:/config' + - ./configs/peer:/config init: true command: iroha --submit-genesis - + healthcheck: + test: test $(curl -s http://127.0.0.1:8080/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s iroha1: - image: hyperledger/iroha2:lts + image: hyperledger/iroha2:dev platform: linux/amd64 environment: - TORII_P2P_ADDR: iroha1:1338 - TORII_API_URL: iroha1:8081 - TORII_TELEMETRY_URL: iroha1:8181 - IROHA_PUBLIC_KEY: "ed0120CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1" - IROHA_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "3BAC34CDA9E3763FA069C1198312D1EC73B53023B8180C822AC355435EDC4A24CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1"}' - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337", "public_key": "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B"}, {"address":"iroha1:1338", "public_key": "ed0120CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1"}, {"address": "iroha2:1339", "public_key": "ed0120FACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020"}, {"address": "iroha3:1340", "public_key": "ed01208E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F"}]' - IROHA_GENESIS_ACCOUNT_PUBLIC_KEY: 'ed01203F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255' - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_COUNT_LIMIT: 100 - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_PERIOD_MS: 500 - IROHA_GENESIS_GENESIS_SUBMISSION_DELAY_MS: 1000 + IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 + IROHA_CONFIG: /config/config.json + IROHA_PUBLIC_KEY: ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F + IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"c02ffad5e455e7ec620d74de5769681e4d8385906bce5a437eb67452a9efbbc2815bbdc9775d28c3633269b25f22d048e2aa2e36017cbe5ad85f15220beb6f6f"}' + TORII_P2P_ADDR: 0.0.0.0:1338 + TORII_API_URL: 0.0.0.0:8081 + IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' ports: - - "1338:1338" - - "8081:8081" - - "8181:8181" + - 1338:1338 + - 8081:8081 volumes: - - './configs/peer/lts:/config' + - ./configs/peer:/config init: true - + healthcheck: + test: test $(curl -s http://127.0.0.1:8081/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s iroha2: - image: hyperledger/iroha2:lts + image: hyperledger/iroha2:dev platform: linux/amd64 environment: - TORII_P2P_ADDR: iroha2:1339 - TORII_API_URL: iroha2:8082 - TORII_TELEMETRY_URL: iroha2:8182 - IROHA_PUBLIC_KEY: "ed0120FACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020" - IROHA_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "1261A436D36779223D7D6CF20E8B644510E488E6A50BAFD77A7485264D27197DFACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020"}' - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337", "public_key": "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B"}, {"address":"iroha1:1338", "public_key": "ed0120CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1"}, {"address": "iroha2:1339", "public_key": "ed0120FACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020"}, {"address": "iroha3:1340", "public_key": "ed01208E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F"}]' - IROHA_GENESIS_ACCOUNT_PUBLIC_KEY: 'ed01203F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255' - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_COUNT_LIMIT: 100 - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_PERIOD_MS: 500 - IROHA_GENESIS_GENESIS_SUBMISSION_DELAY_MS: 1000 + IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 + IROHA_CONFIG: /config/config.json + IROHA_PUBLIC_KEY: ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736 + IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"29c5ed1409cb10fd791bc4ff8a6cb5e22a5fae7e36f448ef3ea2988b1319a88bf417e0371e6adb32fd66749477402b1ab67f84a8e9b082e997980cc91f327736"}' + TORII_P2P_ADDR: 0.0.0.0:1339 + TORII_API_URL: 0.0.0.0:8082 + IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"}]' ports: - - "1339:1339" - - "8082:8082" - - "8182:8182" + - 1339:1339 + - 8082:8082 volumes: - - './configs/peer/lts:/config' + - ./configs/peer:/config init: true - + healthcheck: + test: test $(curl -s http://127.0.0.1:8082/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s iroha3: - image: hyperledger/iroha2:lts + image: hyperledger/iroha2:dev platform: linux/amd64 environment: - TORII_P2P_ADDR: iroha3:1340 - TORII_API_URL: iroha3:8083 - TORII_TELEMETRY_URL: iroha3:8183 - IROHA_PUBLIC_KEY: "ed01208E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F" - IROHA_PRIVATE_KEY: '{"digest_function": "ed25519", "payload": "A70DAB95C7482EB9F159111B65947E482108CFE67DF877BD8D3B9441A781C7C98E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F"}' - SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337", "public_key": "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B"}, {"address":"iroha1:1338", "public_key": "ed0120CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1"}, {"address": "iroha2:1339", "public_key": "ed0120FACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020"}, {"address": "iroha3:1340", "public_key": "ed01208E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F"}]' - IROHA_GENESIS_ACCOUNT_PUBLIC_KEY: 'ed01203F4E3E98571B55514EDC5CCF7E53CA7509D89B2868E62921180A6F57C2F4E255' - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_COUNT_LIMIT: 100 - IROHA_GENESIS_WAIT_FOR_PEERS_RETRY_PERIOD_MS: 500 - IROHA_GENESIS_GENESIS_SUBMISSION_DELAY_MS: 1000 + IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 + IROHA_CONFIG: /config/config.json + IROHA_PUBLIC_KEY: ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61 + IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"5eed4855fad183c451aac39dfc50831607e4cf408c98e2b977f3ce4a2df42ce2a66522370d60b9c09e79ade2e9bb1ef2e78733a944b999b3a6aee687ce476d61"}' + TORII_P2P_ADDR: 0.0.0.0:1340 + TORII_API_URL: 0.0.0.0:8083 + IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' ports: - - "1340:1340" - - "8083:8083" - - "8183:8183" + - 1340:1340 + - 8083:8083 volumes: - - './configs/peer/lts:/config' + - ./configs/peer:/config init: true + healthcheck: + test: test $(curl -s http://127.0.0.1:8083/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s diff --git a/docs/source/references/schema.json b/docs/source/references/schema.json index 63895fe10aa..60225b3c14a 100644 --- a/docs/source/references/schema.json +++ b/docs/source/references/schema.json @@ -3061,7 +3061,7 @@ "PublicKey": { "Struct": [ { - "name": "digest_function", + "name": "algorithm", "type": "Algorithm" }, { diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index 2df43091b4d..b0d9a1cef9a 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -63,17 +63,9 @@ impl GenesisNetwork { .chain(raw_block.transactions); let transactions = transactions_iter - .enumerate() - .map(|(i, raw_transaction)| { - raw_transaction - // FIXME: fix underlying chain of `.sign` so that it doesn't - // consume the key pair unnecessarily. It might be costly to clone - // the key pair for a large genesis. - .sign(chain_id.clone(), genesis_key_pair.clone()) - .map(GenesisTransaction) - .wrap_err_with(|| eyre!("Failed to sign transaction at index {i}")) - }) - .collect::>>()?; + .map(|raw_transaction| raw_transaction.sign(chain_id.clone(), genesis_key_pair)) + .map(GenesisTransaction) + .collect(); Ok(GenesisNetwork { transactions }) } @@ -190,14 +182,8 @@ pub struct GenesisTransactionBuilder { impl GenesisTransactionBuilder { /// Convert [`GenesisTransactionBuilder`] into [`SignedTransaction`] with signature. - /// - /// # Errors - /// Fails if signing or accepting fails. - fn sign( - self, - chain_id: ChainId, - genesis_key_pair: KeyPair, - ) -> core::result::Result { + #[must_use] + fn sign(self, chain_id: ChainId, genesis_key_pair: &KeyPair) -> SignedTransaction { TransactionBuilder::new(chain_id, GENESIS_ACCOUNT_ID.clone()) .with_instructions(self.isi) .sign(genesis_key_pair) diff --git a/p2p/Cargo.toml b/p2p/Cargo.toml index 6745de7e992..65d52302204 100644 --- a/p2p/Cargo.toml +++ b/p2p/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] iroha_logger = { workspace = true } -iroha_crypto = { workspace = true } -iroha_data_model = { workspace = true, features = ["transparent_api"] } +iroha_crypto = { workspace = true, default-features = true } +iroha_data_model = { workspace = true, default-features = true, features = ["transparent_api"] } iroha_primitives = { workspace = true } iroha_config_base = { workspace = true } iroha_data_model_derive = { workspace = true } diff --git a/p2p/src/network.rs b/p2p/src/network.rs index 51b97d661e2..453e4d23ad4 100644 --- a/p2p/src/network.rs +++ b/p2p/src/network.rs @@ -10,6 +10,7 @@ use iroha_crypto::{KeyPair, PublicKey}; use iroha_data_model::prelude::PeerId; use iroha_logger::prelude::*; use iroha_primitives::addr::SocketAddr; +use parity_scale_codec::Encode as _; use tokio::{ net::{TcpListener, TcpStream}, sync::{mpsc, watch}, @@ -282,12 +283,12 @@ impl NetworkBase { fn set_current_topology(&mut self, UpdateTopology(topology): UpdateTopology) { iroha_logger::debug!(?topology, "Network receive new topology"); - let self_public_key_hash = blake2b_hash(self.key_pair.public_key().payload()); + let self_public_key_hash = blake2b_hash(self.key_pair.public_key().encode()); let topology = topology .into_iter() .map(|peer_id| { // Determine who is responsible for connecting - let peer_public_key_hash = blake2b_hash(peer_id.public_key().payload()); + let peer_public_key_hash = blake2b_hash(peer_id.public_key().encode()); let is_active = self_public_key_hash > peer_public_key_hash; (peer_id, is_active) }) @@ -302,7 +303,7 @@ impl NetworkBase { // Peer is not connected but should .filter_map(|(peer, is_active)| ( !self.peers.contains_key(&peer.public_key) - && !self.connecting_peers.values().any(|public_key| peer.public_key == *public_key) + && !self.connecting_peers.values().any(|public_key| peer.public_key() == public_key) && *is_active ).then_some(peer)) .cloned() @@ -320,7 +321,7 @@ impl NetworkBase { } for public_key in to_disconnect { - self.disconnect_peer(&public_key) + self.disconnect_peer(public_key) } } @@ -343,14 +344,14 @@ impl NetworkBase { ); } - fn disconnect_peer(&mut self, public_key: &PublicKey) { - let peer = match self.peers.remove(public_key) { + fn disconnect_peer(&mut self, public_key: PublicKey) { + let peer = match self.peers.remove(&public_key) { Some(peer) => peer, _ => return iroha_logger::warn!(?public_key, "Not found peer to disconnect"), }; iroha_logger::debug!(listen_addr = %self.listen_addr, %peer.conn_id, "Disconnecting peer"); - let peer_id = PeerId::new(&peer.p2p_addr, public_key); + let peer_id = PeerId::new(peer.p2p_addr, public_key); Self::remove_online_peer(&self.online_peers_sender, &peer_id); } @@ -393,7 +394,7 @@ impl NetworkBase { disambiguator, }; let _ = peer_message_sender.send(self.peer_message_sender.clone()); - self.peers.insert(peer_id.public_key.clone(), ref_peer); + self.peers.insert(peer_id.public_key().clone(), ref_peer); self.connecting_peers.remove(&connection_id); Self::add_online_peer(&self.online_peers_sender, peer_id); } @@ -421,7 +422,7 @@ impl NetworkBase { Self::remove_online_peer(&self.online_peers_sender, &peer_id); } } - None if &peer_id.public_key == self.key_pair.public_key() => { + None if peer_id.public_key() == self.key_pair.public_key() => { #[cfg(debug_assertions)] iroha_logger::trace!("Not sending message to myself") } @@ -438,7 +439,7 @@ impl NetworkBase { } = self; peers.retain(|public_key, ref_peer| { if ref_peer.handle.post(data.clone()).is_err() { - let peer_id = PeerId::new(&ref_peer.p2p_addr, public_key); + let peer_id = PeerId::new(ref_peer.p2p_addr.clone(), public_key.clone()); iroha_logger::error!(peer=%peer_id, "Failed to send message to peer"); Self::remove_online_peer(online_peers_sender, &peer_id); false diff --git a/p2p/src/peer.rs b/p2p/src/peer.rs index 182b72e9e7d..b7720d5661a 100644 --- a/p2p/src/peer.rs +++ b/p2p/src/peer.rs @@ -370,8 +370,8 @@ mod run { mod state { //! Module for peer stages. - use iroha_crypto::{KeyPair, PublicKey, Signature}; - use iroha_primitives::{addr::SocketAddr, const_vec::ConstVec}; + use iroha_crypto::{KeyGenOption, KeyPair, PublicKey, Signature}; + use iroha_primitives::addr::SocketAddr; use super::{cryptographer::Cryptographer, *}; @@ -418,8 +418,8 @@ mod state { }: Self, ) -> Result, crate::Error> { let key_exchange = K::new(); - let (kx_local_pk, kx_local_sk) = key_exchange.keypair(None)?; - let (algorithm, kx_local_pk_raw) = kx_local_pk.clone().into_raw(); + let (kx_local_pk, kx_local_sk) = key_exchange.keypair(KeyGenOption::Random); + let (algorithm, kx_local_pk_raw) = kx_local_pk.to_raw(); let write_half = &mut connection.write; garbage::write(write_half).await?; write_half.write_all(&kx_local_pk_raw).await?; @@ -430,9 +430,9 @@ mod state { // Then we have servers public key let mut key = vec![0_u8; 32]; let _ = read_half.read_exact(&mut key).await?; - PublicKey::from_raw(algorithm, ConstVec::new(key)) + PublicKey::from_raw(algorithm, &key).map_err(iroha_crypto::error::Error::from)? }; - let shared_key = key_exchange.compute_shared_secret(&kx_local_sk, &kx_remote_pk); + let shared_key = key_exchange.compute_shared_secret(&kx_local_sk, &kx_remote_pk)?; let cryptographer = Cryptographer::new(&shared_key); Ok(SendKey { peer_addr, @@ -463,20 +463,20 @@ mod state { }: Self, ) -> Result, crate::Error> { let key_exchange = K::new(); - let (kx_local_pk, kx_local_sk) = key_exchange.keypair(None)?; - let (algorithm, kx_local_pk_raw) = kx_local_pk.clone().into_raw(); + let (kx_local_pk, kx_local_sk) = key_exchange.keypair(KeyGenOption::Random); + let (algorithm, kx_local_pk_raw) = kx_local_pk.to_raw(); let read_half = &mut connection.read; let kx_remote_pk = { garbage::read(read_half).await?; // And then we have clients public key let mut key = vec![0_u8; 32]; let _ = read_half.read_exact(&mut key).await?; - PublicKey::from_raw(algorithm, ConstVec::new(key)) + PublicKey::from_raw(algorithm, &key).map_err(iroha_crypto::error::Error::from)? }; let write_half = &mut connection.write; garbage::write(write_half).await?; write_half.write_all(&kx_local_pk_raw).await?; - let shared_key = key_exchange.compute_shared_secret(&kx_local_sk, &kx_remote_pk); + let shared_key = key_exchange.compute_shared_secret(&kx_local_sk, &kx_remote_pk)?; let cryptographer = Cryptographer::new(&shared_key); Ok(SendKey { peer_addr, @@ -513,7 +513,7 @@ mod state { let write_half = &mut connection.write; let payload = create_payload(&kx_local_pk, &kx_remote_pk); - let signature = Signature::new(key_pair, &payload)?; + let signature = Signature::new(&key_pair, &payload); let data = signature.encode(); let data = &cryptographer.encrypt(data.as_slice())?; @@ -570,10 +570,7 @@ mod state { let (remote_pub_key, _) = signature.into(); - let peer_id = PeerId { - address: peer_addr, - public_key: remote_pub_key, - }; + let peer_id = PeerId::new(peer_addr, remote_pub_key); Ok(Ready { peer_id, @@ -592,10 +589,9 @@ mod state { } fn create_payload(kx_local_pk: &PublicKey, kx_remote_pk: &PublicKey) -> Vec { - let mut payload = - Vec::with_capacity(kx_local_pk.payload().len() + kx_remote_pk.payload().len()); - payload.extend(kx_local_pk.payload()); - payload.extend(kx_remote_pk.payload()); + let mut payload = Vec::with_capacity(kx_local_pk.size_hint() + kx_remote_pk.size_hint()); + kx_local_pk.encode_to(&mut payload); + kx_remote_pk.encode_to(&mut payload); payload } } diff --git a/p2p/tests/integration/p2p.rs b/p2p/tests/integration/p2p.rs index b23faff5036..0b45a35cc70 100644 --- a/p2p/tests/integration/p2p.rs +++ b/p2p/tests/integration/p2p.rs @@ -54,10 +54,7 @@ async fn network_create() { tokio::time::sleep(delay).await; info!("Connecting to peer..."); - let peer1 = PeerId { - address: address.clone(), - public_key: public_key.clone(), - }; + let peer1 = PeerId::new(address.clone(), public_key.clone()); let topology = HashSet::from([peer1.clone()]); network.update_topology(UpdateTopology(topology)); tokio::time::sleep(delay).await; @@ -174,14 +171,8 @@ async fn two_networks() { network2.subscribe_to_peers_messages(actor2); info!("Connecting peers..."); - let peer1 = PeerId { - address: address1.clone(), - public_key: public_key1, - }; - let peer2 = PeerId { - address: address2.clone(), - public_key: public_key2, - }; + let peer1 = PeerId::new(address1.clone(), public_key1); + let peer2 = PeerId::new(address2.clone(), public_key2); let topology1 = HashSet::from([peer2.clone()]); let topology2 = HashSet::from([peer1.clone()]); // Connect peers with each other @@ -237,10 +228,7 @@ async fn multiple_networks() { let address = socket_addr!(127.0.0.1: 12_015 + ( i * 5)); let key_pair = KeyPair::generate().unwrap(); let public_key = key_pair.public_key().clone(); - peers.push(PeerId { - address, - public_key, - }); + peers.push(PeerId::new(address, public_key)); key_pairs.push(key_pair); } diff --git a/primitives/src/const_vec.rs b/primitives/src/const_vec.rs index 30974346c16..acba77ac1a7 100644 --- a/primitives/src/const_vec.rs +++ b/primitives/src/const_vec.rs @@ -92,6 +92,33 @@ impl IntoSchema for ConstVec { } } +impl IntoIterator for ConstVec { + type Item = T; + + type IntoIter = as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.into_vec().into_iter() + } +} + +/// Trait to extend `[T]` with a method to convert it to `ConstVec` by analogy with `[T]::to_vec()`. +pub trait ToConstVec { + /// The type of the items in the slice. + type Item; + + /// Copies `self` into a new [`ConstVec`]. + fn to_const_vec(&self) -> ConstVec; +} + +impl ToConstVec for [T] { + type Item = T; + + fn to_const_vec(&self) -> ConstVec { + ConstVec::new(self) + } +} + #[cfg(test)] mod tests { use parity_scale_codec::{Decode, Encode}; diff --git a/primitives/src/small.rs b/primitives/src/small.rs index ddcd4e1635d..cb491c7d95d 100644 --- a/primitives/src/small.rs +++ b/primitives/src/small.rs @@ -118,6 +118,7 @@ mod small_vector { } } + #[allow(clippy::unconditional_recursion)] // False-positive impl PartialEq for SmallVec where A::Item: PartialEq, diff --git a/schema/src/serialize.rs b/schema/src/serialize.rs index de295b1fa81..adaa44a1626 100644 --- a/schema/src/serialize.rs +++ b/schema/src/serialize.rs @@ -23,7 +23,12 @@ struct WithContext<'ctx, 'a, T: ?Sized> { impl WithContext<'_, '_, T> { fn type_name(&self, type_id: TypeId) -> &String { - &self.context.0[&type_id].0 + &self + .context + .0 + .get(&type_id) + .unwrap_or_else(|| panic!("Failed to find type id `{:?}`", type_id)) + .0 } } diff --git a/scripts/tests/consistency.sh b/scripts/tests/consistency.sh index bf5873f2f81..190024d2135 100755 --- a/scripts/tests/consistency.sh +++ b/scripts/tests/consistency.sh @@ -40,19 +40,19 @@ case $1 in } command_base_for_single() { - echo "cargo run --release --bin iroha_swarm -- -p 1 -s Iroha --force --config-dir ./configs/peer --build ." + echo "cargo run --release --bin iroha_swarm -- -p 1 -s Iroha --force --config-dir ./configs/peer --health-check --build ." } command_base_for_multiple_local() { - echo "cargo run --release --bin iroha_swarm -- -p 4 -s Iroha --force --config-dir ./configs/peer --build ." + echo "cargo run --release --bin iroha_swarm -- -p 4 -s Iroha --force --config-dir ./configs/peer --health-check --build ." } command_base_for_default() { - echo "cargo run --release --bin iroha_swarm -- -p 4 -s Iroha --force --config-dir ./configs/peer --image hyperledger/iroha2:dev" + echo "cargo run --release --bin iroha_swarm -- -p 4 -s Iroha --force --config-dir ./configs/peer --health-check --image hyperledger/iroha2:dev" } - do_check "$(command_base_for_single)" "docker-compose.dev.single.yml" - do_check "$(command_base_for_multiple_local)" "docker-compose.dev.local.yml" - do_check "$(command_base_for_default)" "docker-compose.dev.yml" + do_check "$(command_base_for_single)" "docker-compose.single.yml" + do_check "$(command_base_for_multiple_local)" "docker-compose.local.yml" + do_check "$(command_base_for_default)" "docker-compose.yml" esac diff --git a/smart_contract/Cargo.toml b/smart_contract/Cargo.toml index bc03b6a98e5..fe73055e9e4 100644 --- a/smart_contract/Cargo.toml +++ b/smart_contract/Cargo.toml @@ -23,6 +23,10 @@ iroha_smart_contract_derive.workspace = true parity-scale-codec.workspace = true derive_more.workspace = true +getrandom = "0.2" + [dev-dependencies] webassembly-test = "0.1.0" +# Not used directly but required for compilation +getrandom = { version = "0.2", features = ["custom"] } diff --git a/smart_contract/executor/derive/src/validate.rs b/smart_contract/executor/derive/src/validate.rs index 46d3693b62f..35e43dd8fcb 100644 --- a/smart_contract/executor/derive/src/validate.rs +++ b/smart_contract/executor/derive/src/validate.rs @@ -41,7 +41,7 @@ impl FromAttributes for ValidateAttribute { // but we still _want_ to validate that each attribute parses successfully // this is to ensure that we provide the user with as much validation as possible, instead of bailing out early // `Option::or_else` would NOT work here, as it would not validate conditions after the first valid one - #[allow(clippy::or_fun_call)] + #[allow(clippy::or_fun_call, clippy::too_many_lines)] fn from_attributes(attrs: &[Attribute]) -> darling::Result { let mut accumulator = darling::error::Accumulator::default(); diff --git a/smart_contract/executor/src/default.rs b/smart_contract/executor/src/default.rs index 2c6e32c2932..6ac2bde0afb 100644 --- a/smart_contract/executor/src/default.rs +++ b/smart_contract/executor/src/default.rs @@ -173,7 +173,11 @@ pub mod peer { } pub mod domain { - use permission::{account::is_account_owner, domain::is_domain_owner}; + use iroha_smart_contract::data_model::{domain::DomainId, permission::PermissionToken}; + use permission::{ + account::is_account_owner, accounts_permission_tokens, domain::is_domain_owner, + }; + use tokens::AnyPermissionToken; use super::*; @@ -192,21 +196,28 @@ pub mod domain { ) { let domain_id = isi.object_id(); - if is_genesis(executor) { - execute!(executor, isi); - } - match is_domain_owner(domain_id, authority) { - Err(err) => deny!(executor, err), - Ok(true) => execute!(executor, isi), - Ok(false) => {} - } - let can_unregister_domain_token = tokens::domain::CanUnregisterDomain { - domain_id: domain_id.clone(), - }; - if can_unregister_domain_token.is_owned_by(authority) { + if is_genesis(executor) + || match is_domain_owner(domain_id, authority) { + Err(err) => deny!(executor, err), + Ok(is_domain_owner) => is_domain_owner, + } + || { + let can_unregister_domain_token = tokens::domain::CanUnregisterDomain { + domain_id: domain_id.clone(), + }; + can_unregister_domain_token.is_owned_by(authority) + } + { + for (owner_id, permission) in accounts_permission_tokens() { + if is_token_domain_associated(&permission, domain_id) { + let isi = Revoke::permission(permission, owner_id.clone()); + if let Err(_err) = isi.execute() { + deny!(executor, "Can't revoke associated permission token"); + } + } + } execute!(executor, isi); } - deny!(executor, "Can't unregister domain"); } @@ -284,10 +295,123 @@ pub mod domain { deny!(executor, "Can't remove key value in domain metadata"); } + + #[allow(clippy::too_many_lines)] + fn is_token_domain_associated(permission: &PermissionToken, domain_id: &DomainId) -> bool { + let Ok(permission) = AnyPermissionToken::try_from(permission.clone()) else { + return false; + }; + match permission { + AnyPermissionToken::CanUnregisterDomain(permission) => { + &permission.domain_id == domain_id + } + AnyPermissionToken::CanSetKeyValueInDomain(permission) => { + &permission.domain_id == domain_id + } + AnyPermissionToken::CanRemoveKeyValueInDomain(permission) => { + &permission.domain_id == domain_id + } + AnyPermissionToken::CanRegisterAccountInDomain(permission) => { + &permission.domain_id == domain_id + } + AnyPermissionToken::CanRegisterAssetDefinitionInDomain(permission) => { + &permission.domain_id == domain_id + } + AnyPermissionToken::CanUnregisterAssetDefinition(permission) => { + permission.asset_definition_id.domain_id() == domain_id + } + AnyPermissionToken::CanSetKeyValueInAssetDefinition(permission) => { + permission.asset_definition_id.domain_id() == domain_id + } + AnyPermissionToken::CanRemoveKeyValueInAssetDefinition(permission) => { + permission.asset_definition_id.domain_id() == domain_id + } + AnyPermissionToken::CanRegisterAssetWithDefinition(permission) => { + permission.asset_definition_id.domain_id() == domain_id + } + AnyPermissionToken::CanUnregisterAssetWithDefinition(permission) => { + permission.asset_definition_id.domain_id() == domain_id + } + AnyPermissionToken::CanBurnAssetWithDefinition(permission) => { + permission.asset_definition_id.domain_id() == domain_id + } + AnyPermissionToken::CanMintAssetWithDefinition(permission) => { + permission.asset_definition_id.domain_id() == domain_id + } + AnyPermissionToken::CanTransferAssetWithDefinition(permission) => { + permission.asset_definition_id.domain_id() == domain_id + } + AnyPermissionToken::CanBurnUserAsset(permission) => { + permission.asset_id.definition_id().domain_id() == domain_id + || permission.asset_id.account_id().domain_id() == domain_id + } + AnyPermissionToken::CanTransferUserAsset(permission) => { + permission.asset_id.definition_id().domain_id() == domain_id + || permission.asset_id.account_id().domain_id() == domain_id + } + AnyPermissionToken::CanUnregisterUserAsset(permission) => { + permission.asset_id.definition_id().domain_id() == domain_id + || permission.asset_id.account_id().domain_id() == domain_id + } + AnyPermissionToken::CanSetKeyValueInUserAsset(permission) => { + permission.asset_id.definition_id().domain_id() == domain_id + || permission.asset_id.account_id().domain_id() == domain_id + } + AnyPermissionToken::CanRemoveKeyValueInUserAsset(permission) => { + permission.asset_id.definition_id().domain_id() == domain_id + || permission.asset_id.account_id().domain_id() == domain_id + } + AnyPermissionToken::CanMintUserAsset(permission) => { + permission.asset_id.definition_id().domain_id() == domain_id + || permission.asset_id.account_id().domain_id() == domain_id + } + AnyPermissionToken::CanUnregisterAccount(permission) => { + permission.account_id.domain_id() == domain_id + } + AnyPermissionToken::CanMintUserPublicKeys(permission) => { + permission.account_id.domain_id() == domain_id + } + AnyPermissionToken::CanBurnUserPublicKeys(permission) => { + permission.account_id.domain_id() == domain_id + } + AnyPermissionToken::CanMintUserSignatureCheckConditions(permission) => { + permission.account_id.domain_id() == domain_id + } + AnyPermissionToken::CanSetKeyValueInUserAccount(permission) => { + permission.account_id.domain_id() == domain_id + } + AnyPermissionToken::CanRemoveKeyValueInUserAccount(permission) => { + permission.account_id.domain_id() == domain_id + } + AnyPermissionToken::CanUnregisterUserTrigger(permission) => { + permission.trigger_id.domain_id().as_ref() == Some(domain_id) + } + AnyPermissionToken::CanExecuteUserTrigger(permission) => { + permission.trigger_id.domain_id().as_ref() == Some(domain_id) + } + AnyPermissionToken::CanBurnUserTrigger(permission) => { + permission.trigger_id.domain_id().as_ref() == Some(domain_id) + } + AnyPermissionToken::CanMintUserTrigger(permission) => { + permission.trigger_id.domain_id().as_ref() == Some(domain_id) + } + AnyPermissionToken::CanUnregisterAnyPeer(_) + | AnyPermissionToken::CanGrantPermissionToCreateParameters(_) + | AnyPermissionToken::CanRevokePermissionToCreateParameters(_) + | AnyPermissionToken::CanCreateParameters(_) + | AnyPermissionToken::CanGrantPermissionToSetParameters(_) + | AnyPermissionToken::CanRevokePermissionToSetParameters(_) + | AnyPermissionToken::CanSetParameters(_) + | AnyPermissionToken::CanUnregisterAnyRole(_) + | AnyPermissionToken::CanUpgradeExecutor(_) => false, + } + } } pub mod account { - use permission::account::is_account_owner; + use iroha_smart_contract::data_model::permission::PermissionToken; + use permission::{account::is_account_owner, accounts_permission_tokens}; + use tokens::AnyPermissionToken; use super::*; @@ -324,21 +448,28 @@ pub mod account { ) { let account_id = isi.object_id(); - if is_genesis(executor) { - execute!(executor, isi); - } - match is_account_owner(account_id, authority) { - Err(err) => deny!(executor, err), - Ok(true) => execute!(executor, isi), - Ok(false) => {} - } - let can_unregister_user_account = tokens::account::CanUnregisterAccount { - account_id: account_id.clone(), - }; - if can_unregister_user_account.is_owned_by(authority) { + if is_genesis(executor) + || match is_account_owner(account_id, authority) { + Err(err) => deny!(executor, err), + Ok(is_account_owner) => is_account_owner, + } + || { + let can_unregister_user_account = tokens::account::CanUnregisterAccount { + account_id: account_id.clone(), + }; + can_unregister_user_account.is_owned_by(authority) + } + { + for (owner_id, permission) in accounts_permission_tokens() { + if is_token_account_associated(&permission, account_id) { + let isi = Revoke::permission(permission, owner_id.clone()); + if let Err(_err) = isi.execute() { + deny!(executor, "Can't revoke associated permission token"); + } + } + } execute!(executor, isi); } - deny!(executor, "Can't unregister another account"); } @@ -478,10 +609,85 @@ pub mod account { "Can't remove value from the metadata of another account" ); } + + fn is_token_account_associated(permission: &PermissionToken, account_id: &AccountId) -> bool { + let Ok(permission) = AnyPermissionToken::try_from(permission.clone()) else { + return false; + }; + match permission { + AnyPermissionToken::CanUnregisterAccount(permission) => { + &permission.account_id == account_id + } + AnyPermissionToken::CanMintUserPublicKeys(permission) => { + &permission.account_id == account_id + } + AnyPermissionToken::CanBurnUserPublicKeys(permission) => { + &permission.account_id == account_id + } + AnyPermissionToken::CanMintUserSignatureCheckConditions(permission) => { + &permission.account_id == account_id + } + AnyPermissionToken::CanSetKeyValueInUserAccount(permission) => { + &permission.account_id == account_id + } + AnyPermissionToken::CanRemoveKeyValueInUserAccount(permission) => { + &permission.account_id == account_id + } + AnyPermissionToken::CanBurnUserAsset(permission) => { + permission.asset_id.account_id() == account_id + } + AnyPermissionToken::CanTransferUserAsset(permission) => { + permission.asset_id.account_id() == account_id + } + AnyPermissionToken::CanUnregisterUserAsset(permission) => { + permission.asset_id.account_id() == account_id + } + AnyPermissionToken::CanSetKeyValueInUserAsset(permission) => { + permission.asset_id.account_id() == account_id + } + AnyPermissionToken::CanRemoveKeyValueInUserAsset(permission) => { + permission.asset_id.account_id() == account_id + } + AnyPermissionToken::CanMintUserAsset(permission) => { + permission.asset_id.account_id() == account_id + } + AnyPermissionToken::CanUnregisterUserTrigger(_) + | AnyPermissionToken::CanExecuteUserTrigger(_) + | AnyPermissionToken::CanBurnUserTrigger(_) + | AnyPermissionToken::CanMintUserTrigger(_) + | AnyPermissionToken::CanUnregisterAnyPeer(_) + | AnyPermissionToken::CanUnregisterDomain(_) + | AnyPermissionToken::CanSetKeyValueInDomain(_) + | AnyPermissionToken::CanRemoveKeyValueInDomain(_) + | AnyPermissionToken::CanRegisterAccountInDomain(_) + | AnyPermissionToken::CanRegisterAssetDefinitionInDomain(_) + | AnyPermissionToken::CanUnregisterAssetDefinition(_) + | AnyPermissionToken::CanSetKeyValueInAssetDefinition(_) + | AnyPermissionToken::CanRemoveKeyValueInAssetDefinition(_) + | AnyPermissionToken::CanRegisterAssetWithDefinition(_) + | AnyPermissionToken::CanUnregisterAssetWithDefinition(_) + | AnyPermissionToken::CanBurnAssetWithDefinition(_) + | AnyPermissionToken::CanMintAssetWithDefinition(_) + | AnyPermissionToken::CanTransferAssetWithDefinition(_) + | AnyPermissionToken::CanGrantPermissionToCreateParameters(_) + | AnyPermissionToken::CanRevokePermissionToCreateParameters(_) + | AnyPermissionToken::CanCreateParameters(_) + | AnyPermissionToken::CanGrantPermissionToSetParameters(_) + | AnyPermissionToken::CanRevokePermissionToSetParameters(_) + | AnyPermissionToken::CanSetParameters(_) + | AnyPermissionToken::CanUnregisterAnyRole(_) + | AnyPermissionToken::CanUpgradeExecutor(_) => false, + } + } } pub mod asset_definition { - use permission::{account::is_account_owner, asset_definition::is_asset_definition_owner}; + use iroha_smart_contract::data_model::{asset::AssetDefinitionId, permission::PermissionToken}; + use permission::{ + account::is_account_owner, accounts_permission_tokens, + asset_definition::is_asset_definition_owner, + }; + use tokens::AnyPermissionToken; use super::*; @@ -519,22 +725,29 @@ pub mod asset_definition { ) { let asset_definition_id = isi.object_id(); - if is_genesis(executor) { - execute!(executor, isi); - } - match is_asset_definition_owner(asset_definition_id, authority) { - Err(err) => deny!(executor, err), - Ok(true) => execute!(executor, isi), - Ok(false) => {} - } - let can_unregister_asset_definition_token = - tokens::asset_definition::CanUnregisterAssetDefinition { - asset_definition_id: asset_definition_id.clone(), - }; - if can_unregister_asset_definition_token.is_owned_by(authority) { + if is_genesis(executor) + || match is_asset_definition_owner(asset_definition_id, authority) { + Err(err) => deny!(executor, err), + Ok(is_asset_definition_owner) => is_asset_definition_owner, + } + || { + let can_unregister_asset_definition_token = + tokens::asset_definition::CanUnregisterAssetDefinition { + asset_definition_id: asset_definition_id.clone(), + }; + can_unregister_asset_definition_token.is_owned_by(authority) + } + { + for (owner_id, permission) in accounts_permission_tokens() { + if is_token_asset_definition_associated(&permission, asset_definition_id) { + let isi = Revoke::permission(permission, owner_id.clone()); + if let Err(_err) = isi.execute() { + deny!(executor, "Can't revoke associated permission token"); + } + } + } execute!(executor, isi); } - deny!( executor, "Can't unregister asset definition in a domain owned by another account" @@ -626,6 +839,83 @@ pub mod asset_definition { "Can't remove value from the asset definition metadata created by another account" ); } + + fn is_token_asset_definition_associated( + permission: &PermissionToken, + asset_definition_id: &AssetDefinitionId, + ) -> bool { + let Ok(permission) = AnyPermissionToken::try_from(permission.clone()) else { + return false; + }; + match permission { + AnyPermissionToken::CanUnregisterAssetDefinition(permission) => { + &permission.asset_definition_id == asset_definition_id + } + AnyPermissionToken::CanSetKeyValueInAssetDefinition(permission) => { + &permission.asset_definition_id == asset_definition_id + } + AnyPermissionToken::CanRemoveKeyValueInAssetDefinition(permission) => { + &permission.asset_definition_id == asset_definition_id + } + AnyPermissionToken::CanRegisterAssetWithDefinition(permission) => { + &permission.asset_definition_id == asset_definition_id + } + AnyPermissionToken::CanUnregisterAssetWithDefinition(permission) => { + &permission.asset_definition_id == asset_definition_id + } + AnyPermissionToken::CanBurnAssetWithDefinition(permission) => { + &permission.asset_definition_id == asset_definition_id + } + AnyPermissionToken::CanMintAssetWithDefinition(permission) => { + &permission.asset_definition_id == asset_definition_id + } + AnyPermissionToken::CanTransferAssetWithDefinition(permission) => { + &permission.asset_definition_id == asset_definition_id + } + AnyPermissionToken::CanBurnUserAsset(permission) => { + permission.asset_id.definition_id() == asset_definition_id + } + AnyPermissionToken::CanTransferUserAsset(permission) => { + permission.asset_id.definition_id() == asset_definition_id + } + AnyPermissionToken::CanUnregisterUserAsset(permission) => { + permission.asset_id.definition_id() == asset_definition_id + } + AnyPermissionToken::CanSetKeyValueInUserAsset(permission) => { + permission.asset_id.definition_id() == asset_definition_id + } + AnyPermissionToken::CanRemoveKeyValueInUserAsset(permission) => { + permission.asset_id.definition_id() == asset_definition_id + } + AnyPermissionToken::CanMintUserAsset(permission) => { + permission.asset_id.definition_id() == asset_definition_id + } + AnyPermissionToken::CanUnregisterAccount(_) + | AnyPermissionToken::CanMintUserPublicKeys(_) + | AnyPermissionToken::CanBurnUserPublicKeys(_) + | AnyPermissionToken::CanMintUserSignatureCheckConditions(_) + | AnyPermissionToken::CanSetKeyValueInUserAccount(_) + | AnyPermissionToken::CanRemoveKeyValueInUserAccount(_) + | AnyPermissionToken::CanUnregisterUserTrigger(_) + | AnyPermissionToken::CanExecuteUserTrigger(_) + | AnyPermissionToken::CanBurnUserTrigger(_) + | AnyPermissionToken::CanMintUserTrigger(_) + | AnyPermissionToken::CanUnregisterAnyPeer(_) + | AnyPermissionToken::CanUnregisterDomain(_) + | AnyPermissionToken::CanSetKeyValueInDomain(_) + | AnyPermissionToken::CanRemoveKeyValueInDomain(_) + | AnyPermissionToken::CanRegisterAccountInDomain(_) + | AnyPermissionToken::CanRegisterAssetDefinitionInDomain(_) + | AnyPermissionToken::CanGrantPermissionToCreateParameters(_) + | AnyPermissionToken::CanRevokePermissionToCreateParameters(_) + | AnyPermissionToken::CanCreateParameters(_) + | AnyPermissionToken::CanGrantPermissionToSetParameters(_) + | AnyPermissionToken::CanRevokePermissionToSetParameters(_) + | AnyPermissionToken::CanSetParameters(_) + | AnyPermissionToken::CanUnregisterAnyRole(_) + | AnyPermissionToken::CanUpgradeExecutor(_) => false, + } + } } pub mod asset { @@ -986,6 +1276,7 @@ pub mod parameter { pub mod role { use iroha_smart_contract::data_model::role::Role; + use role::tokens::AnyPermissionToken; use super::*; @@ -1002,29 +1293,27 @@ pub mod role { let role = Role::try_from(find_role_query_res).unwrap(); let mut unknown_tokens = Vec::new(); - for token in role.permissions() { - macro_rules! visit_internal { - ($token:ident) => { - if !is_genesis($executor) { - if let Err(error) = permission::ValidateGrantRevoke::$method( - &$token, - $authority, - $executor.block_height(), - ) - { - deny!($executor, error); - } + if !is_genesis($executor) { + for token in role.permissions() { + if let Ok(token) = AnyPermissionToken::try_from(token.clone()) { + if let Err(error) = permission::ValidateGrantRevoke::$method( + &token, + $authority, + $executor.block_height(), + ) { + deny!($executor, error); } - continue; - }; - } + } - tokens::map_token!(token => visit_internal); - unknown_tokens.push(token); + unknown_tokens.push(token); + } } - assert!(unknown_tokens.is_empty(), "Role contains unknown permission tokens: {unknown_tokens:?}"); + assert!( + unknown_tokens.is_empty(), + "Role contains unknown permission tokens: {unknown_tokens:?}" + ); execute!($executor, $isi) }; } @@ -1043,15 +1332,12 @@ pub mod role { for token in role.permissions() { iroha_smart_contract::debug!(&format!("Checking `{token:?}`")); - macro_rules! try_from_token { - ($token:ident) => { - let token = PermissionToken::from($token); - new_role = new_role.add_permission(token); - continue; - }; + if let Ok(any_token) = AnyPermissionToken::try_from(token.clone()) { + let token = PermissionToken::from(any_token); + new_role = new_role.add_permission(token); + continue; } - tokens::map_token!(token => try_from_token); unknown_tokens.push(token); } @@ -1102,7 +1388,9 @@ pub mod role { } pub mod trigger { - use permission::trigger::is_trigger_owner; + use iroha_smart_contract::data_model::{permission::PermissionToken, trigger::TriggerId}; + use permission::{accounts_permission_tokens, trigger::is_trigger_owner}; + use tokens::AnyPermissionToken; use super::*; @@ -1121,21 +1409,28 @@ pub mod trigger { ) { let trigger_id = isi.object_id(); - if is_genesis(executor) { - execute!(executor, isi); - } - match is_trigger_owner(trigger_id, authority) { - Err(err) => deny!(executor, err), - Ok(true) => execute!(executor, isi), - Ok(false) => {} - } - let can_unregister_user_trigger_token = tokens::trigger::CanUnregisterUserTrigger { - trigger_id: trigger_id.clone(), - }; - if can_unregister_user_trigger_token.is_owned_by(authority) { + if is_genesis(executor) + || match is_trigger_owner(trigger_id, authority) { + Err(err) => deny!(executor, err), + Ok(is_trigger_owner) => is_trigger_owner, + } + || { + let can_unregister_user_trigger_token = tokens::trigger::CanUnregisterUserTrigger { + trigger_id: trigger_id.clone(), + }; + can_unregister_user_trigger_token.is_owned_by(authority) + } + { + for (owner_id, permission) in accounts_permission_tokens() { + if is_token_trigger_associated(&permission, trigger_id) { + let isi = Revoke::permission(permission, owner_id.clone()); + if let Err(_err) = isi.execute() { + deny!(executor, "Can't revoke associated permission token"); + } + } + } execute!(executor, isi); } - deny!( executor, "Can't unregister trigger owned by another account" @@ -1222,9 +1517,65 @@ pub mod trigger { deny!(executor, "Can't execute trigger owned by another account"); } + + fn is_token_trigger_associated(permission: &PermissionToken, trigger_id: &TriggerId) -> bool { + let Ok(permission) = AnyPermissionToken::try_from(permission.clone()) else { + return false; + }; + match permission { + AnyPermissionToken::CanUnregisterUserTrigger(permission) => { + &permission.trigger_id == trigger_id + } + AnyPermissionToken::CanExecuteUserTrigger(permission) => { + &permission.trigger_id == trigger_id + } + AnyPermissionToken::CanBurnUserTrigger(permission) => { + &permission.trigger_id == trigger_id + } + AnyPermissionToken::CanMintUserTrigger(permission) => { + &permission.trigger_id == trigger_id + } + AnyPermissionToken::CanUnregisterAnyPeer(_) + | AnyPermissionToken::CanUnregisterDomain(_) + | AnyPermissionToken::CanSetKeyValueInDomain(_) + | AnyPermissionToken::CanRemoveKeyValueInDomain(_) + | AnyPermissionToken::CanRegisterAccountInDomain(_) + | AnyPermissionToken::CanRegisterAssetDefinitionInDomain(_) + | AnyPermissionToken::CanUnregisterAccount(_) + | AnyPermissionToken::CanMintUserPublicKeys(_) + | AnyPermissionToken::CanBurnUserPublicKeys(_) + | AnyPermissionToken::CanMintUserSignatureCheckConditions(_) + | AnyPermissionToken::CanSetKeyValueInUserAccount(_) + | AnyPermissionToken::CanRemoveKeyValueInUserAccount(_) + | AnyPermissionToken::CanUnregisterAssetDefinition(_) + | AnyPermissionToken::CanSetKeyValueInAssetDefinition(_) + | AnyPermissionToken::CanRemoveKeyValueInAssetDefinition(_) + | AnyPermissionToken::CanRegisterAssetWithDefinition(_) + | AnyPermissionToken::CanUnregisterAssetWithDefinition(_) + | AnyPermissionToken::CanUnregisterUserAsset(_) + | AnyPermissionToken::CanBurnAssetWithDefinition(_) + | AnyPermissionToken::CanBurnUserAsset(_) + | AnyPermissionToken::CanMintAssetWithDefinition(_) + | AnyPermissionToken::CanTransferAssetWithDefinition(_) + | AnyPermissionToken::CanTransferUserAsset(_) + | AnyPermissionToken::CanSetKeyValueInUserAsset(_) + | AnyPermissionToken::CanRemoveKeyValueInUserAsset(_) + | AnyPermissionToken::CanMintUserAsset(_) + | AnyPermissionToken::CanGrantPermissionToCreateParameters(_) + | AnyPermissionToken::CanRevokePermissionToCreateParameters(_) + | AnyPermissionToken::CanCreateParameters(_) + | AnyPermissionToken::CanGrantPermissionToSetParameters(_) + | AnyPermissionToken::CanRevokePermissionToSetParameters(_) + | AnyPermissionToken::CanSetParameters(_) + | AnyPermissionToken::CanUnregisterAnyRole(_) + | AnyPermissionToken::CanUpgradeExecutor(_) => false, + } + } } pub mod permission_token { + use tokens::AnyPermissionToken; + use super::*; macro_rules! impl_validate { @@ -1233,26 +1584,22 @@ pub mod permission_token { let token = $isi.object().clone(); let account_id = $isi.destination_id().clone(); - macro_rules! visit_internal { - ($token:ident) => { - let token = PermissionToken::from($token.clone()); - let isi = <$isi_type>::permission(token, account_id); - if is_genesis($executor) { - execute!($executor, isi); - } - if let Err(error) = permission::ValidateGrantRevoke::$method( - &$token, - $authority, - $executor.block_height(), - ) { - deny!($executor, error); - } - + if let Ok(any_token) = AnyPermissionToken::try_from(token.clone()) { + let token = PermissionToken::from(any_token.clone()); + let isi = <$isi_type>::permission(token, account_id); + if is_genesis($executor) { execute!($executor, isi); - }; - } + } + if let Err(error) = permission::ValidateGrantRevoke::$method( + &any_token, + $authority, + $executor.block_height(), + ) { + deny!($executor, error); + } - tokens::map_token!(token => visit_internal); + execute!($executor, isi); + } deny!( $executor, diff --git a/smart_contract/executor/src/default/tokens.rs b/smart_contract/executor/src/default/tokens.rs index 3deb532f5eb..6e90f0fa6aa 100644 --- a/smart_contract/executor/src/default/tokens.rs +++ b/smart_contract/executor/src/default/tokens.rs @@ -29,27 +29,64 @@ use crate::permission::{self, Token as _}; /// ``` macro_rules! declare_tokens { ($($($token_path:ident ::)+ { $token_ty:ident }),+ $(,)?) => { - macro_rules! map_token { - ($token:ident => $callback:ident) => { - match $token.definition_id().as_ref() { $( + macro_rules! map_token_type { + ($callback:ident) => { $( + $callback!($($token_path::)+$token_ty); )+ + }; + } + + /// Enum with every default token + #[allow(clippy::enum_variant_names)] + #[derive(Clone)] + pub(crate) enum AnyPermissionToken { + $( + $token_ty($($token_path::)+$token_ty), + )* + } + + impl TryFrom<$crate::data_model::permission::PermissionToken> for AnyPermissionToken { + type Error = $crate::permission::PermissionTokenConversionError; + + fn try_from(token: $crate::data_model::permission::PermissionToken) -> Result { + match token.definition_id().as_ref() { $( stringify!($token_ty) => { - if let Ok(token) = <$($token_path::)+$token_ty>::try_from($token.clone()) { - $callback!(token); - } + let token = <$($token_path::)+$token_ty>::try_from(token)?; + Ok(Self::$token_ty(token)) } )+ - _ => {} + _ => Err(Self::Error::Id(token.definition_id().clone())) } + } + } - }; + impl From for $crate::data_model::permission::PermissionToken { + fn from(token: AnyPermissionToken) -> Self { + match token { + $( + AnyPermissionToken::$token_ty(token) => Self::from(token), + )* + } + } } - macro_rules! map_token_type { - ($callback:ident) => { $( - $callback!($($token_path::)+$token_ty); )+ - }; + impl $crate::permission::ValidateGrantRevoke for AnyPermissionToken { + fn validate_grant(&self, authority: &AccountId, block_height: u64) -> Result { + match self { + $( + AnyPermissionToken::$token_ty(token) => token.validate_grant(authority, block_height), + )* + } + + } + + fn validate_revoke(&self, authority: &AccountId, block_height: u64) -> Result { + match self { + $( + AnyPermissionToken::$token_ty(token) => token.validate_revoke(authority, block_height), + )* + } + } } - pub(crate) use map_token; pub(crate) use map_token_type; }; } diff --git a/smart_contract/executor/src/lib.rs b/smart_contract/executor/src/lib.rs index ef953f78f14..0610ff22b7a 100644 --- a/smart_contract/executor/src/lib.rs +++ b/smart_contract/executor/src/lib.rs @@ -15,7 +15,7 @@ pub use iroha_smart_contract as smart_contract; pub use iroha_smart_contract_utils::{debug, encode_with_length_prefix}; #[cfg(not(test))] use iroha_smart_contract_utils::{decode_with_length_prefix_from_raw, encode_and_execute}; -pub use smart_contract::{data_model, parse}; +pub use smart_contract::{data_model, parse, stub_getrandom}; pub mod default; pub mod permission; diff --git a/smart_contract/executor/src/permission.rs b/smart_contract/executor/src/permission.rs index e08040fe76a..d733d0c57c9 100644 --- a/smart_contract/executor/src/permission.rs +++ b/smart_contract/executor/src/permission.rs @@ -3,7 +3,7 @@ use alloc::borrow::ToOwned as _; use iroha_schema::IntoSchema; -use iroha_smart_contract::QueryOutputCursor; +use iroha_smart_contract::{data_model::permission::PermissionToken, QueryOutputCursor}; use iroha_smart_contract_utils::debug::DebugExpectExt as _; use serde::{de::DeserializeOwned, Serialize}; @@ -338,3 +338,20 @@ impl From<&T> for OnlyGenesis { Self } } + +/// Iterator over all accounts and theirs permission tokens +pub(crate) fn accounts_permission_tokens() -> impl Iterator { + FindAllAccounts + .execute() + .dbg_expect("failed to query all accounts") + .into_iter() + .map(|account| account.dbg_expect("failed to retrieve account")) + .flat_map(|account| { + FindPermissionTokensByAccountId::new(account.id().clone()) + .execute() + .dbg_expect("failed to query permssion token for account") + .into_iter() + .map(|token| token.dbg_expect("failed to retrieve permission token")) + .map(move |token| (account.id().clone(), token)) + }) +} diff --git a/smart_contract/src/lib.rs b/smart_contract/src/lib.rs index b338ef1f6dc..005b2c69338 100644 --- a/smart_contract/src/lib.rs +++ b/smart_contract/src/lib.rs @@ -19,7 +19,7 @@ use derive_more::Display; pub use iroha_data_model as data_model; use iroha_macro::error::ErrorTryFromEnum; pub use iroha_smart_contract_derive::main; -pub use iroha_smart_contract_utils::{debug, log}; +pub use iroha_smart_contract_utils::{debug, error, info, log, warn}; use iroha_smart_contract_utils::{ debug::DebugExpectExt as _, decode_with_length_prefix_from_raw, encode_and_execute, }; @@ -43,6 +43,39 @@ unsafe extern "C" fn _iroha_smart_contract_dealloc(offset: *mut u8, len: usize) let _box = Box::from_raw(core::slice::from_raw_parts_mut(offset, len)); } +/// Stub for [`getrandom::getrandom()`] for Iroha smart contracts. +/// Prints a log message with [`error!`] and panics. +/// +/// Required in order to crates like `iroha_crypto` to compile. Should never be called. +/// +/// # Panics +/// +/// Always Panics with [`unimplemented!()`]; +/// +/// # Errors +/// +/// No errors, always panics. +/// +/// # Example +/// +/// ``` +/// // Cargo.toml +/// // getrandom = { version = "0.2", features = ["custom"] } +/// +/// getrandom::register_custom_getrandom!(iroha_smart_contract::stub_getrandom); +/// ``` +#[cfg(not(test))] +pub fn stub_getrandom(_dest: &mut [u8]) -> Result<(), getrandom::Error> { + const ERROR_MESSAGE: &str = + "`getrandom()` is not implemented. To provide your custom function \ + see https://docs.rs/getrandom/latest/getrandom/macro.register_custom_getrandom.html. \ + Be aware that your function must give the same result on different peers at the same execution round, + and keep in mind the consequences of purely implemented random function."; + + error!(ERROR_MESSAGE); + unimplemented!("{ERROR_MESSAGE}") +} + /// Macro to parse literal as a type. Panics if failed. /// /// # Example @@ -147,8 +180,9 @@ pub trait ExecuteQueryOnHost: Sized { fn execute(self) -> Result, ValidationFail>; } -impl ExecuteQueryOnHost for Q +impl ExecuteQueryOnHost for Q where + Q: Query + Encode, Q::Output: DecodeAll, >::Error: core::fmt::Debug, { @@ -193,8 +227,9 @@ where } } -impl ExecuteQueryOnHost for QueryRequest +impl ExecuteQueryOnHost for QueryRequest where + Q: Query + Encode, Q::Output: DecodeAll, >::Error: core::fmt::Debug, { @@ -404,7 +439,6 @@ mod host { pub(super) fn execute_instruction(ptr: *const u8, len: usize) -> *const u8; /// Get payload for smart contract `main()` entrypoint. - /// /// # Warning /// /// This function does transfer ownership of the result to the caller diff --git a/smart_contract/trigger/src/lib.rs b/smart_contract/trigger/src/lib.rs index 0edf23d5dd4..413f6f2be77 100644 --- a/smart_contract/trigger/src/lib.rs +++ b/smart_contract/trigger/src/lib.rs @@ -9,7 +9,7 @@ pub use iroha_smart_contract_utils::debug; #[cfg(not(test))] use iroha_smart_contract_utils::decode_with_length_prefix_from_raw; pub use iroha_trigger_derive::main; -pub use smart_contract::data_model; +pub use smart_contract::{data_model, stub_getrandom}; pub mod log { //! WASM logging utilities diff --git a/tools/kagami/src/crypto.rs b/tools/kagami/src/crypto.rs index 22c8c262e68..7d5b2038ab4 100644 --- a/tools/kagami/src/crypto.rs +++ b/tools/kagami/src/crypto.rs @@ -56,7 +56,7 @@ impl RunArgs for Args { let key_pair = self.key_pair()?; writeln!(writer, "{}", &key_pair.public_key())?; writeln!(writer, "{}", &key_pair.private_key())?; - writeln!(writer, "{}", &key_pair.public_key().digest_function())?; + writeln!(writer, "{}", &key_pair.public_key().algorithm())?; } else { let key_pair = self.key_pair()?; writeln!( @@ -67,7 +67,7 @@ impl RunArgs for Args { writeln!( writer, "Private key ({}): \"{}\"", - &key_pair.public_key().digest_function(), + &key_pair.public_key().algorithm(), &key_pair.private_key() )?; } @@ -78,24 +78,23 @@ impl RunArgs for Args { impl Args { fn key_pair(self) -> color_eyre::Result { let algorithm = self.algorithm.0; - let config = KeyGenConfiguration::default().with_algorithm(algorithm); - let key_pair = match (self.seed, self.private_key) { - (None, None) => KeyPair::generate_with_configuration(config), + let configuration = match (self.seed, self.private_key) { + (None, None) => KeyGenConfiguration::from_random(), (None, Some(private_key_hex)) => { let private_key = PrivateKey::from_hex(algorithm, private_key_hex.as_ref()) .wrap_err("Failed to decode private key")?; - KeyPair::generate_with_configuration(config.use_private_key(private_key)) + KeyGenConfiguration::from_private_key(private_key) } (Some(seed), None) => { let seed: Vec = seed.as_bytes().into(); - KeyPair::generate_with_configuration(config.use_seed(seed)) + KeyGenConfiguration::from_seed(seed) } _ => unreachable!("Clap group invariant"), - } - .wrap_err("Failed to generate key pair")?; + }; - Ok(key_pair) + KeyPair::generate_with_configuration(configuration.with_algorithm(algorithm)) + .wrap_err("Failed to generate key pair") } } diff --git a/tools/swarm/Cargo.toml b/tools/swarm/Cargo.toml index bdc30322ae1..da057384bb0 100644 --- a/tools/swarm/Cargo.toml +++ b/tools/swarm/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -iroha_crypto.workspace = true +iroha_crypto = { workspace = true, default-features = true } iroha_data_model.workspace = true iroha_primitives.workspace = true color-eyre.workspace = true diff --git a/tools/swarm/src/cli.rs b/tools/swarm/src/cli.rs index f7185e9519c..fc51fec01be 100644 --- a/tools/swarm/src/cli.rs +++ b/tools/swarm/src/cli.rs @@ -10,6 +10,14 @@ pub struct Cli { /// The Unicode `seed` string for deterministic key-generation. #[arg(long, short)] pub seed: Option, + /// Includes a health check configuration to each service in the Docker Compose output. + /// + /// The health checks use predefined settings. + /// + /// For more details on health check configurations in Docker Compose files, visit: + /// https://docs.docker.com/compose/compose-file/compose-file-v3/#healthcheck + #[arg(long)] + pub health_check: bool, /// Re-create the target file if it already exists. #[arg(long)] pub force: bool, diff --git a/tools/swarm/src/compose.rs b/tools/swarm/src/compose.rs index d6db4acb1d8..edc8b972d46 100644 --- a/tools/swarm/src/compose.rs +++ b/tools/swarm/src/compose.rs @@ -12,9 +12,12 @@ use iroha_crypto::{ error::Error as IrohaCryptoError, KeyGenConfiguration, KeyPair, PrivateKey, PublicKey, }; use iroha_data_model::{prelude::PeerId, ChainId}; -use iroha_primitives::addr::SocketAddr; +use iroha_primitives::addr::{socket_addr, SocketAddr}; use peer_generator::Peer; -use serde::{ser::Error as _, Serialize, Serializer}; +use serde::{ + ser::{Error as _, SerializeMap}, + Serialize, Serializer, +}; use crate::{cli::SourceParsed, util::AbsolutePath}; @@ -88,6 +91,17 @@ impl Serialize for PlatformArchitecture { } } +pub struct DockerComposeServiceBuilder { + chain_id: ChainId, + peer: Peer, + source: ServiceSource, + volumes: Vec<(String, String)>, + trusted_peers: BTreeSet, + genesis_public_key: PublicKey, + genesis_private_key: Option, + health_check: bool, +} + #[derive(Serialize, Debug)] pub struct DockerComposeService { #[serde(flatten)] @@ -99,18 +113,53 @@ pub struct DockerComposeService { init: AlwaysTrue, #[serde(skip_serializing_if = "ServiceCommand::is_none")] command: ServiceCommand, + #[serde(skip_serializing_if = "Option::is_none")] + healthcheck: Option, } -impl DockerComposeService { +impl DockerComposeServiceBuilder { pub fn new( chain_id: ChainId, - peer: &Peer, + peer: Peer, source: ServiceSource, volumes: Vec<(String, String)>, trusted_peers: BTreeSet, genesis_public_key: PublicKey, - genesis_private_key: Option, ) -> Self { + Self { + chain_id, + peer, + source, + volumes, + trusted_peers, + genesis_public_key, + genesis_private_key: None, + health_check: false, + } + } + + pub fn set_health_check(mut self, flag: bool) -> Self { + self.health_check = flag; + self + } + + pub fn submit_genesis_with(mut self, private_key: PrivateKey) -> Self { + self.genesis_private_key = Some(private_key); + self + } + + pub fn build(self) -> DockerComposeService { + let Self { + chain_id, + peer, + source, + volumes, + trusted_peers, + genesis_public_key, + genesis_private_key, + health_check, + } = self; + let ports = vec![ PairColon(peer.port_p2p, peer.port_p2p), PairColon(peer.port_api, peer.port_api), @@ -128,11 +177,11 @@ impl DockerComposeService { genesis_public_key, genesis_private_key, key_pair: peer.key_pair.clone(), - p2p_addr: peer.addr(peer.port_p2p), - api_addr: peer.addr(peer.port_api), + p2p_addr: socket_addr!(0.0.0.0:peer.port_p2p), + api_addr: socket_addr!(0.0.0.0:peer.port_api), }; - Self { + DockerComposeService { source, platform: PlatformArchitecture, command, @@ -140,6 +189,9 @@ impl DockerComposeService { volumes: volumes.into_iter().map(|(a, b)| PairColon(a, b)).collect(), ports, environment: compact_env.into(), + healthcheck: health_check.then_some(HealthCheck { + port: peer.port_api, + }), } } } @@ -180,6 +232,43 @@ impl Serialize for ServiceCommand { } } +/// Serializes as a Iroha health check according to the +/// [spec](https://docs.docker.com/compose/compose-file/compose-file-v3/#healthcheck). +#[derive(Debug)] +struct HealthCheck { + #[allow(dead_code)] + port: u16, +} + +const HEALTH_CHECK_INTERVAL: &str = "2s"; // half of default pipeline time + +const HEALTH_CHECK_TIMEOUT: &str = "1s"; // status request usually resolves immediately + +const HEALTH_CHECK_RETRIES: u8 = 30u8; // try within one minute given the interval + +const HEALTH_CHECK_START_PERIOD: &str = "4s"; // default pipeline time + +impl Serialize for HealthCheck { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut map = serializer.serialize_map(Some(5))?; + map.serialize_entry( + "test", + &format!( + "test $(curl -s http://127.0.0.1:{}/status/blocks) -gt 0", + self.port + ), + )?; + map.serialize_entry("interval", HEALTH_CHECK_INTERVAL)?; + map.serialize_entry("timeout", HEALTH_CHECK_TIMEOUT)?; + map.serialize_entry("retries", &HEALTH_CHECK_RETRIES)?; + map.serialize_entry("start_period", HEALTH_CHECK_START_PERIOD)?; + map.end() + } +} + /// Serializes as `"{0}:{1}"` #[derive(derive_more::Display, Debug)] #[display(fmt = "{_0}:{_1}")] @@ -296,6 +385,7 @@ pub struct DockerComposeBuilder<'a> { pub peers: NonZeroU16, /// Crypto seed to use for keys generation pub seed: Option<&'a [u8]>, + pub health_check: bool, } impl DockerComposeBuilder<'_> { @@ -327,16 +417,15 @@ impl DockerComposeBuilder<'_> { DIR_CONFIG_IN_DOCKER.to_owned(), )]; - let trusted_peers: BTreeSet = - peers.values().map(peer_generator::Peer::id).collect(); + let trusted_peers: BTreeSet = peers.values().map(Peer::id_as_a_service).collect(); let mut peers_iter = peers.iter(); let first_peer_service = { let (name, peer) = peers_iter.next().expect("There is non-zero count of peers"); - let service = DockerComposeService::new( + let service = DockerComposeServiceBuilder::new( chain_id.clone(), - peer, + peer.clone(), service_source.clone(), volumes.clone(), trusted_peers @@ -345,17 +434,19 @@ impl DockerComposeBuilder<'_> { .cloned() .collect(), genesis_key_pair.public_key().clone(), - Some(genesis_key_pair.private_key().clone()), - ); + ) + .submit_genesis_with(genesis_key_pair.private_key().clone()) + .set_health_check(self.health_check) + .build(); (name.clone(), service) }; let services = peers_iter .map(|(name, peer)| { - let service = DockerComposeService::new( + let service = DockerComposeServiceBuilder::new( chain_id.clone(), - peer, + peer.clone(), service_source.clone(), volumes.clone(), trusted_peers @@ -366,8 +457,9 @@ impl DockerComposeBuilder<'_> { .cloned() .collect(), genesis_key_pair.public_key().clone(), - None, - ); + ) + .set_health_check(self.health_check) + .build(); (name.clone(), service) }) @@ -391,12 +483,10 @@ fn generate_key_pair( base_seed: Option<&[u8]>, additional_seed: &[u8], ) -> color_eyre::Result { - let cfg = base_seed - .map(|base| { - let seed: Vec<_> = base.iter().chain(additional_seed).copied().collect(); - KeyGenConfiguration::default().use_seed(seed) - }) - .unwrap_or_default(); + let cfg = base_seed.map_or_else(KeyGenConfiguration::from_random, |base| { + let seed: Vec<_> = base.iter().chain(additional_seed).copied().collect(); + KeyGenConfiguration::from_seed(seed) + }); KeyPair::generate_with_configuration(cfg) } @@ -413,6 +503,7 @@ mod peer_generator { const BASE_PORT_API: u16 = 8080; const BASE_SERVICE_NAME: &'_ str = "iroha"; + #[derive(Clone)] pub struct Peer { pub name: String, pub port_p2p: u16, @@ -421,15 +512,15 @@ mod peer_generator { } impl Peer { - pub fn id(&self) -> PeerId { - PeerId::new(&self.addr(self.port_p2p), self.key_pair.public_key()) - } - - pub fn addr(&self, port: u16) -> SocketAddr { - SocketAddr::Host(SocketAddrHost { + /// [`PeerId`] with an address containing service name as a host, therefore reachable + /// from other Docker Compose services. + pub fn id_as_a_service(&self) -> PeerId { + let address = SocketAddr::Host(SocketAddrHost { host: self.name.clone().into(), - port, - }) + port: self.port_p2p, + }); + + PeerId::new(address.clone(), self.key_pair.public_key().clone()) } } @@ -615,10 +706,11 @@ mod tests { let mut map = BTreeMap::new(); let chain_id = ChainId::new("00000000-0000-0000-0000-000000000000"); - let key_pair = KeyPair::generate_with_configuration( - KeyGenConfiguration::default().use_seed(vec![1, 5, 1, 2, 2, 3, 4, 1, 2, 3]), - ) - .unwrap(); + let key_pair = + KeyPair::generate_with_configuration(KeyGenConfiguration::from_seed(vec![ + 1, 5, 1, 2, 2, 3, 4, 1, 2, 3, + ])) + .unwrap(); map.insert( "iroha0".to_owned(), @@ -646,6 +738,7 @@ mod tests { )], init: AlwaysTrue, command: ServiceCommand::SubmitGenesis, + healthcheck: None, }, ); @@ -686,10 +779,9 @@ mod tests { fn empty_genesis_public_key_is_skipped_in_env() { let chain_id = ChainId::new("00000000-0000-0000-0000-000000000000"); - let key_pair = KeyPair::generate_with_configuration( - KeyGenConfiguration::default().use_seed(vec![0, 1, 2]), - ) - .unwrap(); + let key_pair = + KeyPair::generate_with_configuration(KeyGenConfiguration::from_seed(vec![0, 1, 2])) + .unwrap(); let env: FullPeerEnv = CompactPeerEnv { chain_id, @@ -716,6 +808,7 @@ mod tests { } #[test] + #[allow(clippy::too_many_lines)] fn generate_peers_deterministically() { let root = Path::new("/"); let seed = Some(b"iroha".to_vec()); @@ -732,6 +825,7 @@ mod tests { path: AbsolutePath::from_virtual(&PathBuf::from("/test/iroha-cloned"), root), }, seed, + health_check: true, } .build() .expect("should build with no errors"); @@ -748,8 +842,8 @@ mod tests { IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120F0321EB4139163C35F88BF78520FF7071499D7F4E79854550028A196C7B49E13 IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"5f8d1291bf6b762ee748a87182345d135fd167062857aa4f20ba39f25e74c4b0f0321eb4139163c35f88bf78520ff7071499d7f4e79854550028a196c7b49e13"}' - TORII_P2P_ADDR: iroha0:1337 - TORII_API_URL: iroha0:8080 + TORII_P2P_ADDR: 0.0.0.0:1337 + TORII_API_URL: 0.0.0.0:8080 IROHA_GENESIS_PUBLIC_KEY: ed01203420F48A9EEB12513B8EB7DAF71979CE80A1013F5F341C10DCDA4F6AA19F97A9 IROHA_GENESIS_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"5a6d5f06a90d29ad906e2f6ea8b41b4ef187849d0d397081a4a15ffcbe71e7c73420f48a9eeb12513b8eb7daf71979ce80a1013f5f341c10dcda4f6aa19f97a9"}' IROHA_GENESIS_FILE: /config/genesis.json @@ -761,6 +855,12 @@ mod tests { - ./config:/config init: true command: iroha --submit-genesis + healthcheck: + test: test $(curl -s http://127.0.0.1:8080/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s iroha1: build: ./iroha-cloned platform: linux/amd64 @@ -769,8 +869,8 @@ mod tests { IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120A88554AA5C86D28D0EEBEC497235664433E807881CD31E12A1AF6C4D8B0F026C IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8d34d2c6a699c61e7a9d5aabbbd07629029dfb4f9a0800d65aa6570113edb465a88554aa5c86d28d0eebec497235664433e807881cd31e12a1af6c4d8b0f026c"}' - TORII_P2P_ADDR: iroha1:1338 - TORII_API_URL: iroha1:8081 + TORII_P2P_ADDR: 0.0.0.0:1338 + TORII_API_URL: 0.0.0.0:8081 IROHA_GENESIS_PUBLIC_KEY: ed01203420F48A9EEB12513B8EB7DAF71979CE80A1013F5F341C10DCDA4F6AA19F97A9 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha2:1339","public_key":"ed0120312C1B7B5DE23D366ADCF23CD6DB92CE18B2AA283C7D9F5033B969C2DC2B92F4"},{"address":"iroha3:1340","public_key":"ed0120854457B2E3D6082181DA73DC01C1E6F93A72D0C45268DC8845755287E98A5DEE"},{"address":"iroha0:1337","public_key":"ed0120F0321EB4139163C35F88BF78520FF7071499D7F4E79854550028A196C7B49E13"}]' ports: @@ -779,6 +879,12 @@ mod tests { volumes: - ./config:/config init: true + healthcheck: + test: test $(curl -s http://127.0.0.1:8081/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s iroha2: build: ./iroha-cloned platform: linux/amd64 @@ -787,8 +893,8 @@ mod tests { IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120312C1B7B5DE23D366ADCF23CD6DB92CE18B2AA283C7D9F5033B969C2DC2B92F4 IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"cf4515a82289f312868027568c0da0ee3f0fde7fef1b69deb47b19fde7cbc169312c1b7b5de23d366adcf23cd6db92ce18b2aa283c7d9f5033b969c2dc2b92f4"}' - TORII_P2P_ADDR: iroha2:1339 - TORII_API_URL: iroha2:8082 + TORII_P2P_ADDR: 0.0.0.0:1339 + TORII_API_URL: 0.0.0.0:8082 IROHA_GENESIS_PUBLIC_KEY: ed01203420F48A9EEB12513B8EB7DAF71979CE80A1013F5F341C10DCDA4F6AA19F97A9 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha3:1340","public_key":"ed0120854457B2E3D6082181DA73DC01C1E6F93A72D0C45268DC8845755287E98A5DEE"},{"address":"iroha1:1338","public_key":"ed0120A88554AA5C86D28D0EEBEC497235664433E807881CD31E12A1AF6C4D8B0F026C"},{"address":"iroha0:1337","public_key":"ed0120F0321EB4139163C35F88BF78520FF7071499D7F4E79854550028A196C7B49E13"}]' ports: @@ -797,6 +903,12 @@ mod tests { volumes: - ./config:/config init: true + healthcheck: + test: test $(curl -s http://127.0.0.1:8082/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s iroha3: build: ./iroha-cloned platform: linux/amd64 @@ -805,8 +917,8 @@ mod tests { IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120854457B2E3D6082181DA73DC01C1E6F93A72D0C45268DC8845755287E98A5DEE IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"ab0e99c2b845b4ac7b3e88d25a860793c7eb600a25c66c75cba0bae91e955aa6854457b2e3d6082181da73dc01c1e6f93a72d0c45268dc8845755287e98a5dee"}' - TORII_P2P_ADDR: iroha3:1340 - TORII_API_URL: iroha3:8083 + TORII_P2P_ADDR: 0.0.0.0:1340 + TORII_API_URL: 0.0.0.0:8083 IROHA_GENESIS_PUBLIC_KEY: ed01203420F48A9EEB12513B8EB7DAF71979CE80A1013F5F341C10DCDA4F6AA19F97A9 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha2:1339","public_key":"ed0120312C1B7B5DE23D366ADCF23CD6DB92CE18B2AA283C7D9F5033B969C2DC2B92F4"},{"address":"iroha1:1338","public_key":"ed0120A88554AA5C86D28D0EEBEC497235664433E807881CD31E12A1AF6C4D8B0F026C"},{"address":"iroha0:1337","public_key":"ed0120F0321EB4139163C35F88BF78520FF7071499D7F4E79854550028A196C7B49E13"}]' ports: @@ -815,6 +927,12 @@ mod tests { volumes: - ./config:/config init: true + healthcheck: + test: test $(curl -s http://127.0.0.1:8083/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s "#]]; expected.assert_eq(&yaml); } diff --git a/tools/swarm/src/main.rs b/tools/swarm/src/main.rs index 18e83f64be9..5616b626c63 100644 --- a/tools/swarm/src/main.rs +++ b/tools/swarm/src/main.rs @@ -23,6 +23,7 @@ fn main() -> Result<()> { source: image_source, outfile: target_file_raw, config_dir: config_dir_raw, + health_check, } = Cli::parse(); let seed = seed.map(String::into_bytes); @@ -52,6 +53,7 @@ fn main() -> Result<()> { image_source, peers, seed, + health_check, } .build_and_write(banner_enabled)?; diff --git a/torii/Cargo.toml b/torii/Cargo.toml index e363f9f5e4a..aa9359f97f1 100644 --- a/torii/Cargo.toml +++ b/torii/Cargo.toml @@ -20,6 +20,8 @@ workspace = true [features] # Enables Telemetry (i.e. Status, Metrics, and API Version) endpoints telemetry = ["iroha_telemetry", "iroha_core/telemetry", "serde_json"] +# Enables profiling endpoint +profiling = ["pprof"] # Enables Data Model Schema endpoint schema = ["iroha_schema_gen"] @@ -46,3 +48,5 @@ serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true, optional = true } async-trait = { workspace = true } parity-scale-codec = { workspace = true, features = ["derive"] } +# TODO: switch to original crate once fix is merged (https://github.com/tikv/pprof-rs/pull/241) +pprof = { git = " https://github.com/Erigara/pprof-rs", branch = "fix_pointer_align", optional = true, default-features = false, features = ["protobuf-codec", "frame-pointer", "cpp"] } diff --git a/torii/src/lib.rs b/torii/src/lib.rs index cdf4a46537e..68507798239 100644 --- a/torii/src/lib.rs +++ b/torii/src/lib.rs @@ -90,17 +90,11 @@ impl Torii { .and_then(|| async { Ok::<_, Infallible>(routing::handle_health()) }); let get_router = warp::get().and( - endpoint3( - routing::handle_pending_transactions, - warp::path(uri::PENDING_TRANSACTIONS) - .and(add_state!(self.queue, self.sumeragi,)) - .and(routing::paginate()), - ) - .or(warp::path(uri::CONFIGURATION) + warp::path(uri::CONFIGURATION) .and(add_state!(self.kiso)) .and_then(|kiso| async move { Ok::<_, Infallible>(WarpResult(routing::handle_get_configuration(kiso).await)) - })), + }), ); #[cfg(feature = "telemetry")] @@ -133,6 +127,30 @@ impl Torii { let get_router = get_router.or(warp::path(uri::SCHEMA) .and_then(|| async { Ok::<_, Infallible>(routing::handle_schema().await) })); + #[cfg(feature = "profiling")] + let get_router = { + // `warp` panics if there is `/` in the string given to the `warp::path` filter + // Path filter has to be boxed to have a single uniform type during iteration + let profile_router_path = uri::PROFILE + .split('/') + .skip_while(|p| p.is_empty()) + .fold(warp::any().boxed(), |path_filter, path| { + path_filter.and(warp::path(path)).boxed() + }); + + let profiling_lock = std::sync::Arc::new(tokio::sync::Mutex::new(())); + get_router.or(profile_router_path + .and(warp::query::()) + .and_then(move |params| { + let profiling_lock = Arc::clone(&profiling_lock); + async move { + Ok::<_, Infallible>( + routing::profiling::handle_profile(params, profiling_lock).await, + ) + } + })) + }; + let post_router = warp::post() .and( endpoint4( @@ -144,6 +162,12 @@ impl Torii { )) .and(body::versioned()), ) + .or(endpoint3( + routing::handle_pending_transactions, + warp::path(uri::MATCHING_PENDING_TRANSACTIONS) + .and(add_state!(self.queue, self.sumeragi)) + .and(body::versioned()), + )) .or(endpoint3( routing::handle_queries, warp::path(uri::QUERY) @@ -279,6 +303,9 @@ pub enum Error { #[cfg(feature = "telemetry")] /// Failed to get Prometheus metrics Prometheus(#[source] eyre::Report), + #[cfg(feature = "profiling")] + /// Failed to get pprof profile + Pprof(#[source] eyre::Report), #[cfg(feature = "telemetry")] /// Failed to get status StatusFailure(#[source] eyre::Report), @@ -315,6 +342,8 @@ impl Error { }, #[cfg(feature = "telemetry")] Prometheus(_) | StatusFailure(_) => StatusCode::INTERNAL_SERVER_ERROR, + #[cfg(feature = "profiling")] + Pprof(_) => StatusCode::INTERNAL_SERVER_ERROR, ConfigurationFailure(_) => StatusCode::INTERNAL_SERVER_ERROR, } } diff --git a/torii/src/routing.rs b/torii/src/routing.rs index d909c7faec6..cc5a7ab590e 100644 --- a/torii/src/routing.rs +++ b/torii/src/routing.rs @@ -10,8 +10,7 @@ use eyre::{eyre, WrapErr}; use futures::TryStreamExt; use iroha_config::client_api::ConfigurationDTO; use iroha_core::{ - query::{pagination::Paginate, store::LiveQueryStoreHandle}, - smartcontracts::query::ValidQueryRequest, + query::store::LiveQueryStoreHandle, smartcontracts::query::ValidQueryRequest, sumeragi::SumeragiHandle, }; use iroha_data_model::{ @@ -24,6 +23,7 @@ use iroha_data_model::{ cursor::ForwardCursor, http, sorting::Sorting, Pagination, QueryRequest, QueryWithParameters, }, + transaction::TransactionPayload, BatchedResponse, }; #[cfg(feature = "telemetry")] @@ -146,20 +146,34 @@ pub async fn handle_schema() -> Json { reply::json(&iroha_schema_gen::build_schemas()) } +/// Check if two transactions are the same. Compare their contents excluding the creation time. +fn transaction_payload_eq_excluding_creation_time( + first: &TransactionPayload, + second: &TransactionPayload, +) -> bool { + first.authority() == second.authority() + && first.instructions() == second.instructions() + && first.time_to_live() == second.time_to_live() + && first.metadata().eq(second.metadata()) +} + #[iroha_futures::telemetry_future] pub async fn handle_pending_transactions( queue: Arc, sumeragi: SumeragiHandle, - pagination: Pagination, + transaction: SignedTransaction, ) -> Result>> { let query_response = sumeragi.apply_wsv(|wsv| { queue .all_transactions(wsv) .map(Into::into) - .paginate(pagination) - .collect::>() - // TODO: - //.batched(fetch_size) + .filter(|current_transaction: &SignedTransaction| { + transaction_payload_eq_excluding_creation_time( + current_transaction.payload(), + transaction.payload(), + ) + }) + .collect() }); Ok(Scale(query_response)) @@ -372,3 +386,81 @@ pub fn handle_status( Ok(reply) } } + +#[cfg(feature = "profiling")] +pub mod profiling { + use std::num::{NonZeroU16, NonZeroU64}; + + use pprof::protos::Message; + use serde::{Deserialize, Serialize}; + + use super::*; + + /// Query params used to configure profile gathering + #[derive(Serialize, Deserialize, Clone, Copy)] + pub struct ProfileParams { + /// How often to sample iroha + #[serde(default = "ProfileParams::default_frequency")] + frequency: NonZeroU16, + /// How long to sample iroha + #[serde(default = "ProfileParams::default_seconds")] + seconds: NonZeroU64, + } + + impl ProfileParams { + fn default_frequency() -> NonZeroU16 { + NonZeroU16::new(99).unwrap() + } + + fn default_seconds() -> NonZeroU64 { + NonZeroU64::new(10).unwrap() + } + } + + /// Serve pprof protobuf profiles + pub async fn handle_profile( + ProfileParams { frequency, seconds }: ProfileParams, + profiling_lock: std::sync::Arc>, + ) -> Result> { + match profiling_lock.try_lock() { + Ok(_guard) => { + let mut body = Vec::new(); + { + // Create profiler guard + let guard = pprof::ProfilerGuardBuilder::default() + .frequency(frequency.get() as i32) + .blocklist(&["libc", "libgcc", "pthread", "vdso"]) + .build() + .map_err(|e| { + Error::Pprof(eyre::eyre!( + "pprof::ProfilerGuardBuilder::build fail: {}", + e + )) + })?; + + // Collect profiles for seconds + tokio::time::sleep(tokio::time::Duration::from_secs(seconds.get())).await; + + let report = guard + .report() + .build() + .map_err(|e| Error::Pprof(eyre::eyre!("generate report fail: {}", e)))?; + + let profile = report.pprof().map_err(|e| { + Error::Pprof(eyre::eyre!("generate pprof from report fail: {}", e)) + })?; + + profile.write_to_vec(&mut body).map_err(|e| { + Error::Pprof(eyre::eyre!("encode pprof into bytes fail: {}", e)) + })?; + } + + Ok(body) + } + Err(_) => { + // profile already running return error + Err(Error::Pprof(eyre::eyre!("profiling already running"))) + } + } + } +}