diff --git a/.github/workflows/iroha2-dev-pr.yml b/.github/workflows/iroha2-dev-pr.yml index 1d5526cff59..7846ef54715 100644 --- a/.github/workflows/iroha2-dev-pr.yml +++ b/.github/workflows/iroha2-dev-pr.yml @@ -28,12 +28,6 @@ jobs: - name: Check genesis.json if: always() run: ./scripts/tests/consistency.sh genesis - - name: Check client/config.json - if: always() - run: ./scripts/tests/consistency.sh client - - name: Check peer/config.json - if: always() - run: ./scripts/tests/consistency.sh peer - name: Check schema.json if: always() run: ./scripts/tests/consistency.sh schema @@ -144,11 +138,10 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: Build binaries run: | - cargo build --bin iroha_client_cli - cargo build --bin kagami - cargo build --bin iroha + cargo build -p iroha_client_cli -p kagami -p iroha - name: Setup test Iroha 2 environment on the bare metal run: | + pip3 install -r scripts/requirements.txt --no-input --break-system-packages ./scripts/test_env.py setup - name: Mark binaries as executable run: | @@ -159,6 +152,10 @@ jobs: poetry install - name: Run client cli tests working-directory: client_cli/pytests + env: + # prepared by `test_env.py` + CLIENT_CLI_BINARY: ../../test/iroha_client_cli + CLIENT_CLI_CONFIG: ../../test/client.toml run: | poetry run pytest - name: Cleanup test environment diff --git a/.github/workflows/iroha2-release-pr.yml b/.github/workflows/iroha2-release-pr.yml index cd1a94b8623..99067c687fb 100644 --- a/.github/workflows/iroha2-release-pr.yml +++ b/.github/workflows/iroha2-release-pr.yml @@ -36,6 +36,7 @@ jobs: cargo build --bin iroha - name: Setup test Iroha 2 environment on bare metal run: | + pip3 install -r scripts/requirements.txt --no-input --break-system-packages ./scripts/test_env.py setup - name: Mark binaries as executable run: | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5ef9c16a5ec..854f2302c4d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -222,7 +222,7 @@ Follow these commit guidelines: - To run the source-code based tests, execute [`cargo test`](https://doc.rust-lang.org/cargo/commands/cargo-test.html) in the Iroha root. Note that this is a long process. - To run benchmarks, execute [`cargo bench`](https://doc.rust-lang.org/cargo/commands/cargo-bench.html) from the Iroha root. To help debug benchmark outputs, set the `debug_assertions` environment variable like so: `RUSTFLAGS="--cfg debug_assertions" cargo bench`. - If you are working on a particular component, be mindful that when you run `cargo test` in a [workspace](https://doc.rust-lang.org/cargo/reference/workspaces.html), it will only run the tests for that workspace, which usually doesn't include any [integration tests](https://www.testingxperts.com/blog/what-is-integration-testing). -- If you want to test your changes on a minimal network, the provided [`docker-compose.yml`](docker-compose.yml) creates a network of 4 Iroha peers in docker containers that can be used to test consensus and asset propagation-related logic. We recommend interacting with that network using either [`iroha-python`](https://github.com/hyperledger/iroha-python), or the included `iroha_client_cli`. +- If you want to test your changes on a minimal network, the provided [`docker-compose.yml`](configs/swarm/docker-compose.yml) creates a network of 4 Iroha peers in docker containers that can be used to test consensus and asset propagation-related logic. We recommend interacting with that network using either [`iroha-python`](https://github.com/hyperledger/iroha-python), or the included `iroha_client_cli`. - Do not remove failing tests. Even tests that are ignored will be run in our pipeline eventually. - If possible, please benchmark your code both before and after making your changes, as a significant performance regression can break existing users' installations. diff --git a/Cargo.lock b/Cargo.lock index 4b76a0de150..d2bad0789f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -622,6 +622,7 @@ dependencies = [ "iana-time-zone", "js-sys", "num-traits", + "serde", "wasm-bindgen", "windows-targets 0.48.5", ] @@ -1031,20 +1032,6 @@ dependencies = [ "itertools 0.10.5", ] -[[package]] -name = "crossbeam" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" -dependencies = [ - "cfg-if", - "crossbeam-channel", - "crossbeam-deque", - "crossbeam-epoch", - "crossbeam-queue", - "crossbeam-utils", -] - [[package]] name = "crossbeam-channel" version = "0.5.9" @@ -1302,6 +1289,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc" dependencies = [ "powerfmt", + "serde", ] [[package]] @@ -2626,6 +2614,7 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", + "serde", ] [[package]] @@ -2685,15 +2674,16 @@ dependencies = [ "iroha_telemetry", "iroha_torii", "iroha_wasm_builder", + "json5", "once_cell", "owo-colors", "path-absolutize", - "serde_json", "serial_test", "supports-color 2.1.0", "tempfile", "thread-local-panic-hook", "tokio", + "toml 0.8.8", "tracing", "vergen", ] @@ -2720,18 +2710,22 @@ dependencies = [ "iroha_logger", "iroha_primitives", "iroha_telemetry", + "iroha_torii_const", "iroha_version", "iroha_wasm_builder", + "merge", "once_cell", "parity-scale-codec", "rand", "serde", "serde_json", + "serde_with", "tempfile", "test_network", "thiserror", "tokio", "tokio-tungstenite", + "toml 0.8.8", "tracing-flame", "tracing-subscriber", "tungstenite", @@ -2764,21 +2758,27 @@ dependencies = [ "displaydoc", "expect-test", "eyre", + "hex", "iroha_config_base", "iroha_crypto", "iroha_data_model", "iroha_genesis", "iroha_primitives", "json5", + "merge", + "nonzero_ext", "once_cell", "proptest", "serde", "serde_json", + "serde_with", "stacker", "strum 0.25.0", "thiserror", + "toml 0.8.8", "tracing", "tracing-subscriber", + "trybuild", "url", ] @@ -2786,27 +2786,15 @@ dependencies = [ name = "iroha_config_base" version = "2.0.0-pre-rc.20" dependencies = [ - "crossbeam", - "displaydoc", + "derive_more", + "drop_bomb", "eyre", - "iroha_config_derive", - "iroha_crypto", - "json5", - "parking_lot", + "merge", + "num-traits", "serde", - "serde_json", + "serde_with", "thiserror", -] - -[[package]] -name = "iroha_config_derive" -version = "2.0.0-pre-rc.20" -dependencies = [ - "iroha_macro_utils", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", + "toml 0.8.8", ] [[package]] @@ -3235,6 +3223,7 @@ dependencies = [ "pathdiff", "serde", "serde_json", + "serde_with", "serde_yaml", ] @@ -3294,6 +3283,7 @@ dependencies = [ "iroha_primitives", "iroha_schema_gen", "iroha_telemetry", + "iroha_torii_const", "iroha_torii_derive", "iroha_version", "parity-scale-codec", @@ -3305,6 +3295,13 @@ dependencies = [ "warp", ] +[[package]] +name = "iroha_torii_const" +version = "2.0.0-pre-rc.20" +dependencies = [ + "iroha_primitives", +] + [[package]] name = "iroha_torii_derive" version = "2.0.0-pre-rc.20" @@ -3747,6 +3744,28 @@ dependencies = [ "autocfg", ] +[[package]] +name = "merge" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10bbef93abb1da61525bbc45eeaff6473a41907d19f8f9aa5168d214e10693e9" +dependencies = [ + "merge_derive", + "num-traits", +] + +[[package]] +name = "merge_derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "209d075476da2e63b4b29e72a2ef627b840589588e71400a25e3565c4f849d07" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "mime" version = "0.3.17" @@ -3862,6 +3881,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nonzero_ext" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -4337,12 +4362,11 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-crate" -version = "2.0.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" dependencies = [ - "toml_datetime", - "toml_edit", + "toml_edit 0.20.2", ] [[package]] @@ -4929,6 +4953,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -4947,8 +4980,15 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64cd236ccc1b7a29e7e2739f27c0b2dd199804abc4290e32f59f3b68d6405c23" dependencies = [ + "base64", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.1.0", "serde", + "serde_json", "serde_with_macros", + "time", ] [[package]] @@ -5658,11 +5698,26 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.21.0", +] + [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +dependencies = [ + "serde", +] [[package]] name = "toml_edit" @@ -5675,6 +5730,19 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" +dependencies = [ + "indexmap 2.1.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" version = "0.10.2" @@ -6331,7 +6399,7 @@ dependencies = [ "serde", "serde_derive", "sha2", - "toml", + "toml 0.5.11", "windows-sys 0.48.0", "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index b961c3d7966..7764075fca4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,7 @@ iroha = { path = "cli" } iroha_dsl = { version = "=2.0.0-pre-rc.20", path = "dsl" } iroha_torii = { version = "=2.0.0-pre-rc.20", path = "torii" } iroha_torii_derive = { version = "=2.0.0-pre-rc.20", path = "torii/derive" } +iroha_torii_const = { version = "=2.0.0-pre-rc.20", path = "torii/const" } iroha_macro_utils = { version = "=2.0.0-pre-rc.20", path = "macro/utils" } iroha_telemetry = { version = "=2.0.0-pre-rc.20", path = "telemetry" } iroha_telemetry_derive = { version = "=2.0.0-pre-rc.20", path = "telemetry/derive" } @@ -30,7 +31,6 @@ iroha_data_model_derive = { version = "=2.0.0-pre-rc.20", path = "data_model/der iroha_client = { version = "=2.0.0-pre-rc.20", path = "client" } iroha_config = { version = "=2.0.0-pre-rc.20", path = "config" } iroha_config_base = { version = "=2.0.0-pre-rc.20", path = "config/base" } -iroha_config_derive = { version = "=2.0.0-pre-rc.20", path = "config/base/derive" } iroha_schema_gen = { version = "=2.0.0-pre-rc.20", path = "schema/gen" } iroha_schema = { version = "=2.0.0-pre-rc.20", path = "schema", default-features = false } iroha_schema_derive = { version = "=2.0.0-pre-rc.20", path = "schema/derive" } @@ -65,6 +65,7 @@ syn2 = { package = "syn", version = "2.0.38", default-features = false } quote = "1.0.33" manyhow = { version = "0.8.1", features = ["darling"] } darling = "0.20.3" +drop_bomb = "0.1.5" futures = { version = "0.3.28", default-features = false } tokio = "1.33.0" @@ -88,6 +89,7 @@ impls = "1.0.3" base64 = { version = "0.21.4", default-features = false } hex = { version = "0.4.3", default-features = false } +nonzero_ext = "0.3.0" fixnum = { version = "0.9.2", default-features = false } url = "2.4.1" @@ -132,6 +134,7 @@ serde_yaml = "0.9.25" serde_with = { version = "3.3.0", default-features = false } parity-scale-codec = { version = "3.6.5", default-features = false } json5 = "0.4.1" +toml = "0.8.8" [workspace.lints] rustdoc.private_doc_tests = "deny" @@ -206,7 +209,6 @@ members = [ "client_cli", "config", "config/base", - "config/base/derive", "core", "core/test_network", "crypto", @@ -242,6 +244,7 @@ members = [ "tools/wasm_test_runner", "torii", "torii/derive", + "torii/const", "version", "version/derive", "wasm_codec", diff --git a/Dockerfile b/Dockerfile index 5b78c51c074..fbc66fc751c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -29,6 +29,7 @@ ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER=/x86_64-linux-musl-native/bin/ # builder stage WORKDIR /iroha COPY . . +# FIXME: shouldn't it only build `iroha`, `iroha_client_cli`, and `kagami`? RUN cargo build --target x86_64-unknown-linux-musl --profile deploy @@ -39,9 +40,12 @@ ARG STORAGE=/storage ARG TARGET_DIR=/iroha/target/x86_64-unknown-linux-musl/deploy ENV BIN_PATH=/usr/local/bin/ ENV CONFIG_DIR=/config + +# FIXME: these are obsolete ENV IROHA2_CONFIG_PATH=$CONFIG_DIR/config.json ENV IROHA2_GENESIS_PATH=$CONFIG_DIR/genesis.json ENV KURA_BLOCK_STORE_PATH=$STORAGE + ENV WASM_DIRECTORY=/app/.cache/wasmtime ENV USER=iroha ENV UID=1001 diff --git a/README.md b/README.md index d89ee642b3e..c1306066ac8 100644 --- a/README.md +++ b/README.md @@ -124,9 +124,7 @@ docker compose up With the `docker-compose` instance running, use [Iroha Client CLI](./client_cli/README.md): ```bash -cp configs/client/config.json target/debug/config.json -cd target/debug -./iroha_client_cli --help +cargo run --bin iroha_client_cli -- --config ./configs/swarm/client.toml ``` ## Integration @@ -166,12 +164,7 @@ A brief overview on how to configure and maintain an Iroha instance: There is a set of configuration parameters that could be passed either through a configuration file or environment variables. ```shell -# look for `config.json` or `config.json5` (won't fail if files are not found) -iroha - -# Override default config path through CLI or ENV -iroha --config /path/to/config.json -IROHA_CONFIG=/path/to/config.json iroha +iroha --config /path/to/config.toml ``` **Note:** detailed configuration reference is [work in progress](https://github.com/hyperledger/iroha-2-docs/issues/392). @@ -207,11 +200,7 @@ The details of the `Health` endpoint can be found in the [API Reference > Torii Iroha can produce both JSON-formatted as well as `prometheus`-readable metrics at the `status` and `metrics` endpoints respectively. -The [`prometheus`](https://prometheus.io/docs/introduction/overview/) monitoring system is the de-factor standard for monitoring long-running services such as an Iroha peer. In order to get started, [install `prometheus`](https://prometheus.io/docs/introduction/first_steps/) and execute the following in the project root: - -``` -prometheus --config.file=configs/prometheus.yml -``` +The [`prometheus`](https://prometheus.io/docs/introduction/overview/) monitoring system is the de-factor standard for monitoring long-running services such as an Iroha peer. In order to get started, [install `prometheus`](https://prometheus.io/docs/introduction/first_steps/) and use `configs/prometheus.template.yml` for configuration. ### Storage diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 3d744eded61..4ab631e22e2 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -65,7 +65,8 @@ thread-local-panic-hook = { version = "0.1.0", optional = true } [dev-dependencies] serial_test = "2.0.0" tempfile = { workspace = true } -serde_json = { workspace = true } +toml = { workspace = true } +json5 = { workspace = true } futures = { workspace = true } path-absolutize = { workspace = true } assertables = "7" diff --git a/cli/README.md b/cli/README.md index 87f1c1aed06..5ba8d269b39 100644 --- a/cli/README.md +++ b/cli/README.md @@ -82,17 +82,20 @@ You may deploy Iroha as a [native binary](#native-binary) or by using [Docker](# ### Native binary + + 1. Prepare a deployment environment. If you plan on running the `iroha` peer binary from the directory `deploy`, copy `config.json` and `genesis.json`: ```bash - cp ./target/release/iroha - cp ./configs/peer/config.json deploy - cp ./configs/peer/genesis.json deploy + # FIXME + # cp ./target/release/iroha + # cp ./configs/peer/config.json deploy + # cp ./configs/peer/genesis.json deploy ``` -2. Make necessary edits to `config.json` and `genesis.json`, such as: +2. Make the necessary edits to `config.json` and `genesis.json`, such as: - Generate new key pairs and add their values to `genesis.json`) - Adjust the port values for your initial set of trusted peers @@ -111,7 +114,7 @@ You may deploy Iroha as a [native binary](#native-binary) or by using [Docker](# ### Docker -We provide a sample configuration for Docker in [`docker-compose.yml`](../docker-compose.yml). We highly recommend that you adjust the `config.json` to include a set of new key pairs. +We provide a sample configuration for Docker in [`docker-compose.yml`](../configs/swarm/docker-compose.yml). We highly recommend that you adjust the `config.json` to include a set of new key pairs. [Generate the keys](#generating-keys) and put them into `services.*.environment` in `docker-compose.yml`. Don't forget to update the public keys of `TRUSTED_PEERS`. diff --git a/cli/src/lib.rs b/cli/src/lib.rs index afb4f53e538..5dbf5318efd 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -6,16 +6,10 @@ //! should be constructed externally: (see `main.rs`). #[cfg(debug_assertions)] use core::sync::atomic::{AtomicBool, Ordering}; -use std::{path::PathBuf, sync::Arc}; +use std::{path::Path, sync::Arc}; use color_eyre::eyre::{eyre, Result, WrapErr}; -use iroha_config::{ - base::proxy::{LoadFromDisk, LoadFromEnv, Override}, - genesis::ParsedConfiguration as ParsedGenesisConfiguration, - iroha::{Configuration, ConfigurationProxy}, - path::Path, - telemetry::Configuration as TelemetryConfiguration, -}; +use iroha_config::parameters::{actual::Root as Config, user::CliContext}; use iroha_core::{ block_sync::{BlockSynchronizer, BlockSynchronizerHandle}, gossiper::{TransactionGossiper, TransactionGossiperHandle}, @@ -28,11 +22,10 @@ use iroha_core::{ smartcontracts::isi::Registrable as _, snapshot::{try_read_snapshot, SnapshotMaker, SnapshotMakerHandle}, sumeragi::{SumeragiHandle, SumeragiStartArgs}, - tx::PeerId, IrohaNetwork, }; use iroha_data_model::prelude::*; -use iroha_genesis::GenesisNetwork; +use iroha_genesis::{GenesisNetwork, RawGenesisBlock}; use iroha_logger::actor::LoggerHandle; use iroha_torii::Torii; use tokio::{ @@ -201,28 +194,29 @@ impl Iroha { #[allow(clippy::too_many_lines)] #[iroha_logger::log(name = "init", skip_all)] // This is actually easier to understand as a linear sequence of init statements. pub async fn new( - config: Configuration, + config: Config, genesis: Option, logger: LoggerHandle, ) -> Result { - let listen_addr = config.torii.p2p_addr.clone(); - let network = IrohaNetwork::start(listen_addr, config.sumeragi.key_pair.clone()) - .await - .wrap_err("Unable to start P2P-network")?; + let network = IrohaNetwork::start( + config.common.p2p_address.clone(), + config.common.key_pair.clone(), + ) + .await + .wrap_err("Unable to start P2P-network")?; let (events_sender, _) = broadcast::channel(10000); let world = World::with( - [genesis_domain(config.genesis.public_key.clone())], - config.sumeragi.trusted_peers.peers.clone(), + [genesis_domain(config.genesis.public_key().clone())], + config.sumeragi.trusted_peers.clone(), ); let kura = Kura::new(&config.kura)?; - let live_query_store_handle = - LiveQueryStore::from_configuration(config.live_query_store).start(); + let live_query_store_handle = LiveQueryStore::from_config(config.live_query_store).start(); let block_count = kura.init()?; let wsv = try_read_snapshot( - &config.snapshot.dir_path, + &config.snapshot.store_dir, &kura, live_query_store_handle.clone(), block_count, @@ -230,8 +224,8 @@ impl Iroha { .map_or_else( |error| { iroha_logger::warn!(%error, "Failed to load wsv from snapshot, creating empty wsv"); - WorldStateView::from_configuration( - *config.wsv, + WorldStateView::from_config( + config.chain_wide, world, Arc::clone(&kura), live_query_store_handle.clone(), @@ -246,8 +240,8 @@ impl Iroha { }, ); - let queue = Arc::new(Queue::from_configuration(&config.queue)); - match Self::start_telemetry(&logger, &config.telemetry).await? { + let queue = Arc::new(Queue::from_config(config.queue)); + match Self::start_telemetry(&logger, &config).await? { TelemetryStartStatus::Started => iroha_logger::info!("Telemetry started"), TelemetryStartStatus::NotStarted => iroha_logger::warn!("Telemetry not started"), }; @@ -255,8 +249,8 @@ impl Iroha { let kura_thread_handler = Kura::start(Arc::clone(&kura)); let start_args = SumeragiStartArgs { - chain_id: config.chain_id.clone(), - configuration: config.sumeragi.clone(), + sumeragi_config: config.sumeragi.clone(), + common_config: config.common.clone(), events_sender: events_sender.clone(), wsv, queue: Arc::clone(&queue), @@ -270,18 +264,18 @@ impl Iroha { .await .expect("Failed to join task with Sumeragi start"); - let block_sync = BlockSynchronizer::from_configuration( + let block_sync = BlockSynchronizer::from_config( &config.block_sync, sumeragi.clone(), Arc::clone(&kura), - PeerId::new(config.torii.p2p_addr.clone(), config.public_key.clone()), + config.common.peer_id(), network.clone(), ) .start(); - let gossiper = TransactionGossiper::from_configuration( - config.chain_id.clone(), - &config.sumeragi, + let gossiper = TransactionGossiper::from_config( + config.common.chain_id.clone(), + config.transaction_gossiper, network.clone(), Arc::clone(&queue), sumeragi.clone(), @@ -304,15 +298,14 @@ impl Iroha { } .start(); - let snapshot_maker = - SnapshotMaker::from_configuration(&config.snapshot, sumeragi.clone()).start(); + let snapshot_maker = SnapshotMaker::from_config(&config.snapshot, sumeragi.clone()).start(); let kiso = KisoHandle::new(config.clone()); let torii = Torii::new( - config.chain_id, + config.common.chain_id.clone(), kiso.clone(), - &config.torii, + config.torii, Arc::clone(&queue), events_sender, Arc::clone(¬ify_shutdown), @@ -321,7 +314,7 @@ impl Iroha { Arc::clone(&kura), ); - Self::spawn_configuration_updates_broadcasting(kiso.clone(), logger.clone()); + Self::spawn_config_updates_broadcasting(kiso.clone(), logger.clone()); Self::start_listening_signal(Arc::clone(¬ify_shutdown))?; @@ -376,30 +369,27 @@ impl Iroha { #[cfg(feature = "telemetry")] async fn start_telemetry( logger: &LoggerHandle, - config: &TelemetryConfiguration, + config: &Config, ) -> Result { - #[allow(unused)] - let (config_for_regular, config_for_dev) = config.parse(); - #[cfg(feature = "dev-telemetry")] { - if let Some(config) = config_for_dev { + if let Some(config) = &config.dev_telemetry { let receiver = logger .subscribe_on_telemetry(iroha_logger::telemetry::Channel::Future) .await .wrap_err("Failed to subscribe on telemetry")?; - let _handle = iroha_telemetry::dev::start(config, receiver) + let _handle = iroha_telemetry::dev::start(config.clone(), receiver) .await .wrap_err("Failed to setup telemetry for futures")?; } } - if let Some(config) = config_for_regular { + if let Some(config) = &config.telemetry { let receiver = logger .subscribe_on_telemetry(iroha_logger::telemetry::Channel::Regular) .await .wrap_err("Failed to subscribe on telemetry")?; - let _handle = iroha_telemetry::ws::start(config, receiver) + let _handle = iroha_telemetry::ws::start(config.clone(), receiver) .await .wrap_err("Failed to setup telemetry for websocket communication")?; @@ -412,7 +402,7 @@ impl Iroha { #[cfg(not(feature = "telemetry"))] async fn start_telemetry( _logger: &LoggerHandle, - _config: &TelemetryConfiguration, + _config: &Config, ) -> Result { Ok(TelemetryStartStatus::NotStarted) } @@ -448,7 +438,7 @@ impl Iroha { /// Spawns a task which subscribes on updates from configuration actor /// and broadcasts them further to interested actors. This way, neither config actor nor other ones know /// about each other, achieving loose coupling of code and system. - fn spawn_configuration_updates_broadcasting( + fn spawn_config_updates_broadcasting( kiso: KisoHandle, logger: LoggerHandle, ) -> task::JoinHandle<()> { @@ -498,103 +488,26 @@ fn genesis_domain(public_key: PublicKey) -> Domain { domain } -macro_rules! mutate_nested_option { - ($obj:expr, self, $func:expr) => { - $obj.as_mut().map($func) - }; - ($obj:expr, $field:ident, $func:expr) => { - $obj.$field.as_mut().map($func) - }; - ($obj:expr, [$field:ident, $($rest:tt)+], $func:expr) => { - $obj.$field.as_mut().map(|x| { - mutate_nested_option!(x, [$($rest)+], $func) - }) - }; - ($obj:tt, [$field:tt], $func:expr) => { - mutate_nested_option!($obj, $field, $func) - }; -} - -/// Read and parse Iroha configuration and genesis block. -/// -/// The pipeline of configuration reading is as follows: -/// -/// 1. Construct a layer with default values -/// 2. If [`Path`] resolves, construct a layer from the file and merge it into the previous one -/// 3. Construct a layer from ENV vars and merge it into the previous one -/// 4. Check whether the final layer contains the complete configuration -/// -/// After reading it, this function ensures validity of genesis configuration and constructs the -/// [`GenesisNetwork`] according to it. +/// Read configuration and then a genesis block if specified. /// /// # Errors -/// - If provided user configuration is invalid or incomplete -/// - If genesis config is invalid -pub fn read_config( - path: &Path, +/// - If failed to read the config +/// - If failed to load the genesis block +/// - If failed to build a genesis network +pub fn read_config_and_genesis>( + path: Option

, submit_genesis: bool, -) -> Result<(Configuration, Option)> { - let config = ConfigurationProxy::default(); - - let config = if let Some(actual_config_path) = path - .try_resolve() - .wrap_err("Failed to resolve configuration file")? - { - let mut cfg = config.override_with(ConfigurationProxy::from_path(&*actual_config_path)); - let config_dir = actual_config_path - .parent() - .expect("If config file was read, than it should have a parent. It is a bug."); - - // careful here: `genesis.file` might be a path relative to the config file. - // we need to resolve it before proceeding - // TODO: move this logic into `iroha_config` - // https://github.com/hyperledger/iroha/issues/4161 - let join_to_config_dir = |x: &mut PathBuf| { - *x = config_dir.join(&x); - }; - mutate_nested_option!(cfg, [genesis, file, self], join_to_config_dir); - mutate_nested_option!(cfg, [snapshot, dir_path], join_to_config_dir); - mutate_nested_option!(cfg, [kura, block_store_path], join_to_config_dir); - mutate_nested_option!(cfg, [telemetry, file, self], join_to_config_dir); +) -> Result<(Config, Option)> { + use iroha_config::parameters::actual::Genesis; - cfg - } else { - config - }; + let config = Config::load(path, CliContext { submit_genesis }) + .wrap_err("failed to load configuration")?; - // it is not chained to the previous expressions so that config proxy from env is evaluated - // after reading a file - let config = config.override_with( - ConfigurationProxy::from_std_env().wrap_err("Failed to build configuration from env")?, - ); + let genesis = if let Genesis::Full { key_pair, file } = &config.genesis { + let raw_block = RawGenesisBlock::from_path(file)?; - let config = config - .build() - .wrap_err("Failed to finalize configuration")?; - - // TODO: move validation logic below to `iroha_config` - - if !submit_genesis && config.sumeragi.trusted_peers.peers.len() < 2 { - return Err(eyre!("\ - The network consists from this one peer only (`sumeragi.trusted_peers` is less than 2). \ - Since `--submit-genesis` is not set, there is no way to receive the genesis block. \ - Either provide the genesis by setting `--submit-genesis` argument, `genesis.private_key`, \ - and `genesis.file` configuration parameters, or increase the number of trusted peers in \ - the network using `sumeragi.trusted_peers` configuration parameter. - ")); - } - - let genesis = if let ParsedGenesisConfiguration::Full { - key_pair, - raw_block, - } = config - .genesis - .clone() - .parse(submit_genesis) - .wrap_err("Invalid genesis configuration")? - { Some( - GenesisNetwork::new(raw_block, &config.chain_id, &key_pair) + GenesisNetwork::new(raw_block, &config.common.chain_id, key_pair) .wrap_err("Failed to construct the genesis")?, ) } else { @@ -637,6 +550,7 @@ mod tests { mod config_integration { use assertables::{assert_contains, assert_contains_as_result}; + use iroha_config::parameters::user::RootPartial as PartialUserConfig; use iroha_crypto::KeyPair; use iroha_genesis::{ExecutorMode, ExecutorPath}; use iroha_primitives::addr::socket_addr; @@ -644,24 +558,20 @@ mod tests { use super::*; - fn config_factory() -> ConfigurationProxy { - let key_pair = KeyPair::generate(); + fn config_factory() -> PartialUserConfig { + let (pubkey, privkey) = KeyPair::generate().into(); - let mut base = ConfigurationProxy { - chain_id: Some(ChainId::new("0")), + let mut base = PartialUserConfig::default(); - public_key: Some(key_pair.public_key().clone()), - private_key: Some(key_pair.private_key().clone()), + base.chain_id.set(ChainId::from("0")); + base.public_key.set(pubkey.clone()); + base.private_key.set(privkey.clone()); + base.network.address.set(socket_addr!(127.0.0.1:1337)); - ..ConfigurationProxy::default() - }; - let genesis = base.genesis.as_mut().unwrap(); - genesis.private_key = Some(Some(key_pair.private_key().clone())); - genesis.public_key = Some(key_pair.public_key().clone()); + base.genesis.public_key.set(pubkey); + base.genesis.private_key.set(privkey); - let torii = base.torii.as_mut().unwrap(); - torii.p2p_addr = Some(socket_addr!(127.0.0.1:1337)); - torii.api_url = Some(socket_addr!(127.0.0.1:1337)); + base.torii.address.set(socket_addr!(127.0.0.1:8080)); base } @@ -676,28 +586,28 @@ mod tests { let config = { let mut cfg = config_factory(); - cfg.genesis.as_mut().unwrap().file = Some(Some("./genesis/gen.json".into())); - cfg.kura.as_mut().unwrap().block_store_path = Some("../storage".into()); - cfg.snapshot.as_mut().unwrap().dir_path = Some("../snapshots".into()); - cfg.telemetry.as_mut().unwrap().file = Some(Some("../logs/telemetry".into())); - cfg + cfg.genesis.file.set("./genesis/gen.json".into()); + cfg.kura.store_dir.set("../storage".into()); + cfg.snapshot.store_dir.set("../snapshots".into()); + cfg.telemetry.dev.out_file.set("../logs/telemetry".into()); + toml::Value::try_from(cfg)? }; let dir = tempfile::tempdir()?; let genesis_path = dir.path().join("config/genesis/gen.json"); let executor_path = dir.path().join("config/genesis/executor.wasm"); - let config_path = dir.path().join("config/config.json5"); + let config_path = dir.path().join("config/config.toml"); std::fs::create_dir(dir.path().join("config"))?; std::fs::create_dir(dir.path().join("config/genesis"))?; - std::fs::write(config_path, serde_json::to_string(&config)?)?; - std::fs::write(genesis_path, serde_json::to_string(&genesis)?)?; + std::fs::write(config_path, toml::to_string(&config)?)?; + std::fs::write(genesis_path, json5::to_string(&genesis)?)?; std::fs::write(executor_path, "")?; - let config_path = Path::default(dir.path().join("config/config")); + let config_path = dir.path().join("config/config.toml"); // When - let (config, genesis) = read_config(&config_path, true)?; + let (config, genesis) = read_config_and_genesis(Some(config_path), true)?; // Then @@ -705,15 +615,19 @@ mod tests { assert!(genesis.is_some()); assert_eq!( - config.kura.block_store_path.absolutize()?, + config.kura.store_dir.absolutize()?, dir.path().join("storage") ); assert_eq!( - config.snapshot.dir_path.absolutize()?, + config.snapshot.store_dir.absolutize()?, dir.path().join("snapshots") ); assert_eq!( - config.telemetry.file.expect("Should be set").absolutize()?, + config + .dev_telemetry + .expect("dev telemetry should be set") + .out_file + .absolutize()?, dir.path().join("logs/telemetry") ); @@ -730,28 +644,22 @@ mod tests { let config = { let mut cfg = config_factory(); - cfg.genesis.as_mut().unwrap().file = Some(Some("./genesis.json".into())); - cfg + cfg.genesis.file.set("./genesis.json".into()); + toml::Value::try_from(cfg)? }; let dir = tempfile::tempdir()?; - std::fs::write( - dir.path().join("config.json"), - serde_json::to_string(&config)?, - )?; - std::fs::write( - dir.path().join("genesis.json"), - serde_json::to_string(&genesis)?, - )?; + std::fs::write(dir.path().join("config.toml"), toml::to_string(&config)?)?; + std::fs::write(dir.path().join("genesis.json"), json5::to_string(&genesis)?)?; std::fs::write(dir.path().join("executor.wasm"), "")?; - let config_path = Path::user_provided(dir.path().join("config.json"))?; + let config_path = dir.path().join("config.toml"); // When & Then - let report = read_config(&config_path, false).unwrap_err(); + let report = read_config_and_genesis(Some(config_path), false).unwrap_err(); assert_contains!( - format!("{report}"), + format!("{report:#}"), "The network consists from this one peer only" ); diff --git a/cli/src/main.rs b/cli/src/main.rs index 14ef7a587d5..34c7909ef9d 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,11 +1,8 @@ //! Iroha peer command-line interface. -use std::env; +use std::{env, path::PathBuf}; use clap::Parser; use color_eyre::eyre::Result; -use iroha_config::path::Path; - -const DEFAULT_CONFIG_PATH: &str = "config"; fn is_colouring_supported() -> bool { supports_color::on(supports_color::Stream::Stdout).is_some() @@ -19,22 +16,9 @@ fn default_terminal_colors_str() -> clap::builder::OsStr { #[derive(Parser, Debug)] #[command(name = "iroha", version = concat!("version=", env!("CARGO_PKG_VERSION"), " git_commit_sha=", env!("VERGEN_GIT_SHA")), author)] struct Args { - /// Path to the configuration file, defaults to `config.json`/`config.json5` - /// - /// Supported extensions are `.json` and `.json5`. By default, Iroha looks for a - /// `config` file with one of the supported extensions in the current working directory. - /// If the default config file is not found, Iroha will rely on default values and environment - /// variables. However, if the config path is set explicitly with this argument and the file - /// is not found, Iroha will exit with an error. - #[arg( - long, - short, - env("IROHA_CONFIG"), - value_name("PATH"), - value_parser(Path::user_provided_str), - value_hint(clap::ValueHint::FilePath) - )] - config: Option, + /// Path to the configuration file + #[arg(long, short, value_name("PATH"), value_hint(clap::ValueHint::FilePath))] + config: Option, /// Whether to enable ANSI colored output or not /// /// By default, Iroha determines whether the terminal supports colors or not. @@ -73,11 +57,7 @@ async fn main() -> Result<()> { color_eyre::install()?; } - let config_path = args - .config - .unwrap_or_else(|| Path::default(DEFAULT_CONFIG_PATH)); - - let (config, genesis) = iroha::read_config(&config_path, args.submit_genesis)?; + let (config, genesis) = iroha::read_config_and_genesis(args.config, args.submit_genesis)?; let logger = iroha_logger::init_global(&config.logger, args.terminal_colors)?; iroha_logger::info!( @@ -100,8 +80,6 @@ async fn main() -> Result<()> { #[cfg(test)] mod tests { - use assertables::{assert_contains, assert_contains_as_result}; - use super::*; #[test] @@ -109,7 +87,6 @@ mod tests { fn default_args() -> Result<()> { let args = Args::try_parse_from(["test"])?; - assert_eq!(args.config, None); assert_eq!(args.terminal_colors, is_colouring_supported()); assert_eq!(args.submit_genesis, false); @@ -139,21 +116,14 @@ mod tests { fn user_provided_config_path_works() -> Result<()> { let args = Args::try_parse_from(["test", "--config", "/home/custom/file.json"])?; - assert_eq!( - args.config, - Some(Path::user_provided("/home/custom/file.json").unwrap()) - ); + assert_eq!(args.config, Some(PathBuf::from("/home/custom/file.json"))); Ok(()) } #[test] - fn user_cannot_provide_invalid_extension() { - let err = Args::try_parse_from(["test", "--config", "file.toml"]) - .expect_err("Should not allow TOML"); - - let formatted = format!("{err}"); - assert_contains!(formatted, "invalid value 'file.toml' for '--config"); - assert_contains!(formatted, "unsupported file extension `toml`"); + fn user_can_provide_any_extension() { + let _args = Args::try_parse_from(["test", "--config", "file.toml.but.not"]) + .expect("should allow doing this as well"); } } diff --git a/cli/src/samples.rs b/cli/src/samples.rs index 0a4c13870b2..35fd25da53e 100644 --- a/cli/src/samples.rs +++ b/cli/src/samples.rs @@ -1,14 +1,23 @@ //! This module contains the sample configurations used for testing and benchmarking throughout Iroha. -use std::{collections::HashSet, path::Path, str::FromStr}; +use std::{collections::HashSet, path::Path, str::FromStr, time::Duration}; use iroha_config::{ - iroha::{Configuration, ConfigurationProxy}, - sumeragi::TrustedPeers, - torii::{uri::DEFAULT_API_ADDR, DEFAULT_TORII_P2P_ADDR}, + base::{HumanDuration, UnwrapPartial}, + parameters::{ + actual::Root as Config, + user::{CliContext, RootPartial as UserConfig}, + }, }; use iroha_crypto::{KeyPair, PublicKey}; use iroha_data_model::{peer::PeerId, prelude::*, ChainId}; -use iroha_primitives::unique_vec::UniqueVec; +use iroha_primitives::{ + addr::{socket_addr, SocketAddr}, + unique_vec::UniqueVec, +}; + +// FIXME: move to a global test-related place, re-use everywhere else +const DEFAULT_P2P_ADDR: SocketAddr = socket_addr!(127.0.0.1:1337); +const DEFAULT_TORII_ADDR: SocketAddr = socket_addr!(127.0.0.1:8080); /// Get sample trusted peers. The public key must be the same as `configuration.public_key` /// @@ -33,57 +42,57 @@ pub fn get_trusted_peers(public_key: Option<&PublicKey>) -> HashSet { .map(|(a, k)| PeerId::new(a.parse().expect("Valid"), PublicKey::from_str(k).unwrap())) .collect(); if let Some(pubkey) = public_key { - trusted_peers.insert(PeerId::new(DEFAULT_TORII_P2P_ADDR.clone(), pubkey.clone())); + trusted_peers.insert(PeerId { + address: DEFAULT_P2P_ADDR.clone(), + public_key: pubkey.clone(), + }); } trusted_peers } #[allow(clippy::implicit_hasher)] -/// Get a sample Iroha configuration proxy. Trusted peers must be +/// Get a sample Iroha configuration on user-layer level. Trusted peers must be /// specified in this function, including the current peer. Use [`get_trusted_peers`] /// to populate `trusted_peers` if in doubt. Almost equivalent to the [`get_config`] /// function, except the proxy is left unbuilt. /// /// # Panics /// - when [`KeyPair`] generation fails (rare case). -pub fn get_config_proxy( - peers: UniqueVec, +pub fn get_user_config( + peers: &UniqueVec, chain_id: Option, key_pair: Option, -) -> ConfigurationProxy { - let chain_id = chain_id.unwrap_or_else(|| ChainId::new("0")); +) -> UserConfig { + let chain_id = chain_id.unwrap_or_else(|| ChainId::from("0")); let (public_key, private_key) = key_pair.unwrap_or_else(KeyPair::generate).into(); iroha_logger::info!(%public_key); - ConfigurationProxy { - chain_id: Some(chain_id), - public_key: Some(public_key.clone()), - private_key: Some(private_key.clone()), - sumeragi: Some(Box::new(iroha_config::sumeragi::ConfigurationProxy { - max_transactions_in_block: Some(2), - trusted_peers: Some(TrustedPeers { peers }), - ..iroha_config::sumeragi::ConfigurationProxy::default() - })), - torii: Some(Box::new(iroha_config::torii::ConfigurationProxy { - p2p_addr: Some(DEFAULT_TORII_P2P_ADDR.clone()), - api_url: Some(DEFAULT_API_ADDR.clone()), - ..iroha_config::torii::ConfigurationProxy::default() - })), - block_sync: Some(iroha_config::block_sync::ConfigurationProxy { - block_batch_size: Some(1), - gossip_period_ms: Some(500), - ..iroha_config::block_sync::ConfigurationProxy::default() - }), - queue: Some(iroha_config::queue::ConfigurationProxy { - ..iroha_config::queue::ConfigurationProxy::default() - }), - genesis: Some(Box::new(iroha_config::genesis::ConfigurationProxy { - private_key: Some(Some(private_key)), - public_key: Some(public_key), - file: Some(Some("./genesis.json".into())), - })), - ..ConfigurationProxy::default() - } + + let mut config = UserConfig::new(); + + config.chain_id.set(chain_id); + config.public_key.set(public_key.clone()); + config.private_key.set(private_key.clone()); + config.network.address.set(DEFAULT_P2P_ADDR); + config + .chain_wide + .max_transactions_in_block + .set(2.try_into().unwrap()); + config.sumeragi.trusted_peers.set(peers.to_vec()); + config.torii.address.set(DEFAULT_TORII_ADDR); + config + .network + .block_gossip_max_size + .set(1.try_into().unwrap()); + config + .network + .block_gossip_period + .set(HumanDuration(Duration::from_millis(500))); + config.genesis.private_key.set(private_key); + config.genesis.public_key.set(public_key); + config.genesis.file.set("./genesis.json".into()); + + config } #[allow(clippy::implicit_hasher)] @@ -94,13 +103,17 @@ pub fn get_config_proxy( /// # Panics /// - when [`KeyPair`] generation fails (rare case). pub fn get_config( - trusted_peers: UniqueVec, + trusted_peers: &UniqueVec, chain_id: Option, key_pair: Option, -) -> Configuration { - get_config_proxy(trusted_peers, chain_id, key_pair) - .build() - .expect("Iroha config should build as all required fields were provided") +) -> Config { + get_user_config(trusted_peers, chain_id, key_pair) + .unwrap_partial() + .expect("config should build as all required fields were provided") + .parse(CliContext { + submit_genesis: true, + }) + .expect("config should finalize as the input is semantically valid (or there is a bug)") } /// Construct executor from path. diff --git a/client/Cargo.toml b/client/Cargo.toml index 5a3aba4aadb..1d38df505b9 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -54,6 +54,7 @@ iroha_data_model = { workspace = true, features = ["http"] } iroha_primitives = { workspace = true } iroha_logger = { workspace = true } iroha_telemetry = { workspace = true } +iroha_torii_const = { workspace = true } iroha_version = { workspace = true, features = ["http"] } attohttpc = { version = "0.26.1", default-features = false } @@ -62,6 +63,7 @@ http = "0.2.9" url = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } +serde_with = { workspace = true } serde_json = { workspace = true } base64 = { workspace = true } thiserror = { workspace = true } @@ -72,6 +74,8 @@ tokio = { workspace = true, features = ["rt"] } tokio-tungstenite = { workspace = true } tungstenite = { workspace = true } futures-util = "0.3.28" +merge = "0.1.0" +toml = { workspace = true } [dev-dependencies] iroha_wasm_builder = { workspace = true } diff --git a/client/README.md b/client/README.md index 10073589a11..c15f7c9b6ad 100644 --- a/client/README.md +++ b/client/README.md @@ -16,15 +16,9 @@ Follow the [Iroha 2 tutorial](https://hyperledger.github.io/iroha-2-docs/guide/r Add the following to the manifest file of your Rust project: ```toml -iroha_client = { git = "https://github.com/hyperledger/iroha/", branch="iroha2-dev" } +iroha_client = { git = "https://github.com/hyperledger/iroha", branch = "iroha2-dev" } ``` ## Examples -```rust -let configuration = - &Configuration::from_path("config.json").expect("Failed to load configuration."); -let mut iroha_client = Client::new(configuration); -``` - We highly recommend looking at the sample [`iroha_client_cli`](../client_cli) implementation binary as well as our [tutorial](https://hyperledger.github.io/iroha-2-docs/guide/rust.html) for more examples and explanations. diff --git a/client/benches/torii.rs b/client/benches/torii.rs index dd503e3c396..669fcc0c917 100644 --- a/client/benches/torii.rs +++ b/client/benches/torii.rs @@ -17,23 +17,12 @@ use tokio::runtime::Runtime; const MINIMUM_SUCCESS_REQUEST_RATIO: f32 = 0.9; -// assumes that config is having a complete genesis key pair -fn get_genesis_key_pair(config: &iroha_config::iroha::Configuration) -> KeyPair { - if let (public_key, Some(private_key)) = - (&config.genesis.public_key, &config.genesis.private_key) - { - KeyPair::new(public_key.clone(), private_key.clone()).expect("Should be valid") - } else { - panic!("Cannot get genesis key pair from the config. Probably a bug.") - } -} - fn query_requests(criterion: &mut Criterion) { let mut peer = ::new().expect("Failed to create peer"); let chain_id = get_chain_id(); let configuration = get_config( - unique_vec![peer.id.clone()], + &unique_vec![peer.id.clone()], Some(chain_id.clone()), Some(get_key_pair()), ); @@ -52,12 +41,15 @@ fn query_requests(criterion: &mut Criterion) { ) .build(), &chain_id, - &get_genesis_key_pair(&configuration), + configuration + .genesis + .key_pair() + .expect("genesis config should be full, probably a bug"), ) .expect("genesis creation failed"); let builder = PeerBuilder::new() - .with_configuration(configuration) + .with_config(configuration) .with_into_genesis(genesis); rt.block_on(builder.start_with_peer(&mut peer)); @@ -81,12 +73,13 @@ fn query_requests(criterion: &mut Criterion) { quantity, AssetId::new(asset_definition_id, account_id.clone()), ); - let mut client_config = - iroha_client::samples::get_client_config(get_chain_id(), &get_key_pair()); - - client_config.torii_api_url = format!("http://{}", peer.api_address).parse().unwrap(); + let client_config = iroha_client::samples::get_client_config( + get_chain_id(), + get_key_pair(), + format!("http://{}", peer.api_address).parse().unwrap(), + ); - let iroha_client = Client::new(&client_config).expect("Invalid client configuration"); + let iroha_client = Client::new(client_config); thread::sleep(std::time::Duration::from_millis(5000)); let instructions: [InstructionBox; 4] = [ @@ -139,7 +132,7 @@ fn instruction_submits(criterion: &mut Criterion) { let chain_id = get_chain_id(); let configuration = get_config( - unique_vec![peer.id.clone()], + &unique_vec![peer.id.clone()], Some(chain_id.clone()), Some(get_key_pair()), ); @@ -148,7 +141,7 @@ fn instruction_submits(criterion: &mut Criterion) { .domain("wonderland".parse().expect("Valid")) .account( "alice".parse().expect("Valid"), - configuration.public_key.clone(), + configuration.common.key_pair.public_key().clone(), ) .finish_domain() .executor( @@ -156,11 +149,14 @@ fn instruction_submits(criterion: &mut Criterion) { ) .build(), &chain_id, - &get_genesis_key_pair(&configuration), + configuration + .genesis + .key_pair() + .expect("config should be full; probably a bug"), ) .expect("failed to create genesis"); let builder = PeerBuilder::new() - .with_configuration(configuration) + .with_config(configuration) .with_into_genesis(genesis); rt.block_on(builder.start_with_peer(&mut peer)); let mut group = criterion.benchmark_group("instruction-requests"); @@ -170,10 +166,12 @@ fn instruction_submits(criterion: &mut Criterion) { let (public_key, _) = KeyPair::generate().into(); let create_account = Register::account(Account::new(account_id.clone(), [public_key])).into(); let asset_definition_id = AssetDefinitionId::new(domain_id, "xor".parse().expect("Valid")); - let mut client_config = - iroha_client::samples::get_client_config(get_chain_id(), &get_key_pair()); - client_config.torii_api_url = format!("http://{}", peer.api_address).parse().unwrap(); - let iroha_client = Client::new(&client_config).expect("Invalid client configuration"); + let client_config = iroha_client::samples::get_client_config( + get_chain_id(), + get_key_pair(), + format!("http://{}", peer.api_address).parse().unwrap(), + ); + let iroha_client = Client::new(client_config); thread::sleep(std::time::Duration::from_millis(5000)); let _ = iroha_client .submit_all([create_domain, create_account]) diff --git a/client/benches/tps/utils.rs b/client/benches/tps/utils.rs index 3901d11b266..f078481269b 100644 --- a/client/benches/tps/utils.rs +++ b/client/benches/tps/utils.rs @@ -196,7 +196,7 @@ impl MeasurerUnit { /// Spawn who periodically submits transactions fn spawn_transaction_submitter(&self, shutdown_signal: mpsc::Receiver<()>) -> JoinHandle<()> { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let submitter = self.client.clone(); let interval_us_per_tx = self.config.interval_us_per_tx; diff --git a/client/examples/million_accounts_genesis.rs b/client/examples/million_accounts_genesis.rs index 737e9236246..c618caf700b 100644 --- a/client/examples/million_accounts_genesis.rs +++ b/client/examples/million_accounts_genesis.rs @@ -2,7 +2,7 @@ use std::{thread, time::Duration}; use iroha::samples::{construct_executor, get_config}; -use iroha_client::{crypto::KeyPair, data_model::prelude::*}; +use iroha_client::data_model::prelude::*; use iroha_data_model::isi::InstructionBox; use iroha_genesis::{GenesisNetwork, RawGenesisBlock, RawGenesisBlockBuilder}; use iroha_primitives::unique_vec; @@ -40,28 +40,24 @@ fn main_genesis() { let chain_id = get_chain_id(); let configuration = get_config( - unique_vec![peer.id.clone()], + &unique_vec![peer.id.clone()], Some(chain_id.clone()), Some(get_key_pair()), ); let rt = Runtime::test(); - let genesis = GenesisNetwork::new(generate_genesis(1_000_000_u32), &chain_id, &{ - let private_key = configuration + let genesis = GenesisNetwork::new( + generate_genesis(1_000_000_u32), + &chain_id, + configuration .genesis - .private_key - .as_ref() - .expect("Should be from get_config"); - KeyPair::new( - configuration.genesis.public_key.clone(), - private_key.clone(), - ) - .expect("Should be a valid key pair") - }) + .key_pair() + .expect("should be available in the config; probably a bug"), + ) .expect("genesis creation failed"); let builder = PeerBuilder::new() .with_into_genesis(genesis) - .with_configuration(configuration); + .with_config(configuration); // This only submits the genesis. It doesn't check if the accounts // are created, because that check is 1) not needed for what the diff --git a/client/examples/tutorial.rs b/client/examples/tutorial.rs index b83a2665ae3..bec8227f6a5 100644 --- a/client/examples/tutorial.rs +++ b/client/examples/tutorial.rs @@ -1,50 +1,34 @@ //! This file contains examples from the Rust tutorial. //! -use std::fs::File; use eyre::{Error, WrapErr}; -use iroha_client::config::Configuration; +use iroha_client::config::Config; // #region rust_config_crates // #endregion rust_config_crates fn main() { // #region rust_config_load - let config_loc = "../configs/client/config.json"; - let file = File::open(config_loc) - .wrap_err("Unable to load the configuration file at `.....`") - .expect("Config file is loading normally."); - let config: Configuration = serde_json::from_reader(file) - .wrap_err("Failed to parse `../configs/client/config.json`") - .expect("Verified in tests"); + let config = Config::load("../configs/swarm/client.toml").unwrap(); // #endregion rust_config_load // Your code goes hereā€¦ - json_config_client_test(&config) - .expect("JSON config client example is expected to work correctly"); - domain_registration_test(&config) + domain_registration_test(config.clone()) .expect("Domain registration example is expected to work correctly"); account_definition_test().expect("Account definition example is expected to work correctly"); - account_registration_test(&config) + account_registration_test(config.clone()) .expect("Account registration example is expected to work correctly"); - asset_registration_test(&config) + asset_registration_test(config.clone()) .expect("Asset registration example is expected to work correctly"); - asset_minting_test(&config).expect("Asset minting example is expected to work correctly"); - asset_burning_test(&config).expect("Asset burning example is expected to work correctly"); + asset_minting_test(config.clone()) + .expect("Asset minting example is expected to work correctly"); + asset_burning_test(config.clone()) + .expect("Asset burning example is expected to work correctly"); // output_visualising_test(&config).expect(msg: "Visualising outputs example is expected to work correctly"); println!("Success!"); } -fn json_config_client_test(config: &Configuration) -> Result<(), Error> { - use iroha_client::client::Client; - - // Initialise a client with a provided config - let _current_client: Client = Client::new(config)?; - - Ok(()) -} - -fn domain_registration_test(config: &Configuration) -> Result<(), Error> { +fn domain_registration_test(config: Config) -> Result<(), Error> { // #region domain_register_example_crates use iroha_client::{ client::Client, @@ -67,7 +51,7 @@ fn domain_registration_test(config: &Configuration) -> Result<(), Error> { // #region rust_client_create // Create an Iroha client - let iroha_client: Client = Client::new(config)?; + let iroha_client = Client::new(config); // #endregion rust_client_create // #region domain_register_example_prepare_tx @@ -108,7 +92,7 @@ fn account_definition_test() -> Result<(), Error> { Ok(()) } -fn account_registration_test(config: &Configuration) -> Result<(), Error> { +fn account_registration_test(config: Config) -> Result<(), Error> { // #region register_account_crates use iroha_client::{ client::Client, @@ -121,7 +105,7 @@ fn account_registration_test(config: &Configuration) -> Result<(), Error> { // #endregion register_account_crates // Create an Iroha client - let iroha_client: Client = Client::new(config)?; + let iroha_client = Client::new(config); // #region register_account_create // Create an AccountId instance by providing the account and domain name @@ -156,7 +140,7 @@ fn account_registration_test(config: &Configuration) -> Result<(), Error> { Ok(()) } -fn asset_registration_test(config: &Configuration) -> Result<(), Error> { +fn asset_registration_test(config: Config) -> Result<(), Error> { // #region register_asset_crates use std::str::FromStr as _; @@ -169,7 +153,7 @@ fn asset_registration_test(config: &Configuration) -> Result<(), Error> { // #endregion register_asset_crates // Create an Iroha client - let iroha_client: Client = Client::new(config)?; + let iroha_client = Client::new(config); // #region register_asset_create_asset // Create an asset @@ -206,7 +190,7 @@ fn asset_registration_test(config: &Configuration) -> Result<(), Error> { Ok(()) } -fn asset_minting_test(config: &Configuration) -> Result<(), Error> { +fn asset_minting_test(config: Config) -> Result<(), Error> { // #region mint_asset_crates use std::str::FromStr; @@ -217,7 +201,7 @@ fn asset_minting_test(config: &Configuration) -> Result<(), Error> { // #endregion mint_asset_crates // Create an Iroha client - let iroha_client: Client = Client::new(config)?; + let iroha_client = Client::new(config); // Define the instances of an Asset and Account // #region mint_asset_define_asset_account @@ -257,7 +241,7 @@ fn asset_minting_test(config: &Configuration) -> Result<(), Error> { Ok(()) } -fn asset_burning_test(config: &Configuration) -> Result<(), Error> { +fn asset_burning_test(config: Config) -> Result<(), Error> { // #region burn_asset_crates use std::str::FromStr; @@ -268,7 +252,7 @@ fn asset_burning_test(config: &Configuration) -> Result<(), Error> { // #endregion burn_asset_crates // Create an Iroha client - let iroha_client: Client = Client::new(config)?; + let iroha_client = Client::new(config); // #region burn_asset_define_asset_account // Define the instances of an Asset and Account diff --git a/client/src/client.rs b/client/src/client.rs index 3dcbbc44635..fadfcafef36 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -13,8 +13,10 @@ use derive_more::{DebugCustom, Display}; use eyre::{eyre, Result, WrapErr}; use futures_util::StreamExt; use http_default::{AsyncWebSocketStream, WebSocketStream}; +pub use iroha_config::client_api::ConfigDTO; use iroha_logger::prelude::*; use iroha_telemetry::metrics::Status; +use iroha_torii_const::uri as torii_uri; use iroha_version::prelude::*; use parity_scale_codec::DecodeAll; use rand::Rng; @@ -22,7 +24,7 @@ use url::Url; use self::{blocks_api::AsyncBlockStream, events_api::AsyncEventStream}; use crate::{ - config::{api::ConfigurationDTO, Configuration}, + config::Config, crypto::{HashOf, KeyPair}, data_model::{ block::SignedBlock, @@ -361,7 +363,7 @@ pub struct QueryRequest { impl QueryRequest { #[cfg(test)] fn dummy() -> Self { - let torii_url = crate::config::torii::DEFAULT_API_ADDR; + let torii_url = torii_uri::DEFAULT_API_ADDR; Self { torii_url: format!("http://{torii_url}").parse().unwrap(), @@ -380,9 +382,7 @@ impl QueryRequest { fn assemble(self) -> DefaultRequestBuilder { let builder = DefaultRequestBuilder::new( HttpMethod::POST, - self.torii_url - .join(crate::config::torii::QUERY) - .expect("Valid URI"), + self.torii_url.join(torii_uri::QUERY).expect("Valid URI"), ) .headers(self.headers); @@ -402,49 +402,45 @@ impl QueryRequest { /// Representation of `Iroha` client. impl Client { /// Constructor for client from configuration - /// - /// # Errors - /// If configuration isn't valid (e.g public/private keys don't match) #[inline] - pub fn new(configuration: &Configuration) -> Result { + pub fn new(configuration: Config) -> Self { Self::with_headers(configuration, HashMap::new()) } /// Constructor for client from configuration and headers /// - /// *Authorization* header will be added, if `login` and `password` fields are presented - /// - /// # Errors - /// If configuration isn't valid (e.g public/private keys don't match) + /// *Authorization* header will be added if `basic_auth` is presented #[inline] pub fn with_headers( - configuration: &Configuration, + Config { + chain_id, + account_id, + torii_api_url, + key_pair, + basic_auth, + transaction_add_nonce, + transaction_ttl, + transaction_status_timeout, + }: Config, mut headers: HashMap, - ) -> Result { - if let Some(basic_auth) = &configuration.basic_auth { + ) -> Self { + if let Some(basic_auth) = basic_auth { let credentials = format!("{}:{}", basic_auth.web_login, basic_auth.password); let engine = base64::engine::general_purpose::STANDARD; let encoded = base64::engine::Engine::encode(&engine, credentials); headers.insert(String::from("Authorization"), format!("Basic {encoded}")); } - Ok(Self { - chain_id: configuration.chain_id.clone(), - torii_url: configuration.torii_api_url.clone(), - key_pair: KeyPair::new( - configuration.public_key.clone(), - configuration.private_key.clone(), - )?, - transaction_ttl: configuration - .transaction_time_to_live_ms - .map(|ttl| Duration::from_millis(ttl.into())), - transaction_status_timeout: Duration::from_millis( - configuration.transaction_status_timeout_ms, - ), - account_id: configuration.account_id.clone(), + Self { + chain_id, + torii_url: torii_api_url, + key_pair, + transaction_ttl: Some(transaction_ttl), + transaction_status_timeout, + account_id, headers, - add_transaction_nonce: configuration.add_transaction_nonce, - }) + add_transaction_nonce: transaction_add_nonce, + } } /// Builds transaction out of supplied instructions or wasm. @@ -668,7 +664,7 @@ impl Client { B::new( HttpMethod::POST, self.torii_url - .join(crate::config::torii::TRANSACTION) + .join(torii_uri::TRANSACTION) .expect("Valid URI"), ) .headers(self.headers.clone()) @@ -936,7 +932,7 @@ impl Client { event_filter, self.headers.clone(), self.torii_url - .join(crate::config::torii::SUBSCRIPTION) + .join(torii_uri::SUBSCRIPTION) .expect("Valid URI"), ) } @@ -972,7 +968,7 @@ impl Client { height, self.headers.clone(), self.torii_url - .join(crate::config::torii::BLOCKS_STREAM) + .join(torii_uri::BLOCKS_STREAM) .expect("Valid URI"), ) } @@ -990,7 +986,7 @@ impl Client { ) -> Result> { let url = self .torii_url - .join(crate::config::torii::MATCHING_PENDING_TRANSACTIONS) + .join(torii_uri::MATCHING_PENDING_TRANSACTIONS) .expect("Valid URI"); let body = transaction.encode(); @@ -1025,11 +1021,11 @@ impl Client { /// /// # Errors /// Fails if sending request or decoding fails - pub fn get_config(&self) -> Result { + pub fn get_config(&self) -> Result { let resp = DefaultRequestBuilder::new( HttpMethod::GET, self.torii_url - .join(crate::config::torii::CONFIGURATION) + .join(torii_uri::CONFIGURATION) .expect("Valid URI"), ) .headers(&self.headers) @@ -1051,11 +1047,11 @@ impl Client { /// /// # Errors /// If sending request or decoding fails - pub fn set_config(&self, dto: ConfigurationDTO) -> Result<()> { + pub fn set_config(&self, dto: ConfigDTO) -> Result<()> { let body = serde_json::to_vec(&dto).wrap_err(format!("Failed to serialize {dto:?}"))?; let url = self .torii_url - .join(crate::config::torii::CONFIGURATION) + .join(torii_uri::CONFIGURATION) .expect("Valid URI"); let resp = DefaultRequestBuilder::new(HttpMethod::POST, url) .headers(&self.headers) @@ -1094,9 +1090,7 @@ impl Client { pub fn prepare_status_request(&self) -> B { B::new( HttpMethod::GET, - self.torii_url - .join(crate::config::torii::STATUS) - .expect("Valid URI"), + self.torii_url.join(torii_uri::STATUS).expect("Valid URI"), ) .headers(self.headers.clone()) } @@ -1595,33 +1589,34 @@ mod tests { use iroha_primitives::small::SmallStr; use super::*; - use crate::config::{torii::DEFAULT_API_ADDR, BasicAuth, ConfigurationProxy, WebLogin}; + use crate::config::{BasicAuth, Config, WebLogin}; const LOGIN: &str = "mad_hatter"; const PASSWORD: &str = "ilovetea"; // `mad_hatter:ilovetea` encoded with base64 const ENCRYPTED_CREDENTIALS: &str = "bWFkX2hhdHRlcjppbG92ZXRlYQ=="; + fn config_factory() -> Config { + Config { + chain_id: ChainId::from("0"), + key_pair: KeyPair::generate(), + account_id: "alice@wonderland" + .parse() + .expect("This account ID should be valid"), + torii_api_url: "http://127.0.0.1:8080".parse().unwrap(), + basic_auth: None, + transaction_add_nonce: false, + transaction_ttl: Duration::from_secs(5), + transaction_status_timeout: Duration::from_secs(10), + } + } + #[test] fn txs_same_except_for_nonce_have_different_hashes() { - let (public_key, private_key) = KeyPair::generate().into(); - - let cfg = ConfigurationProxy { - chain_id: Some(ChainId::new("0")), - public_key: Some(public_key), - private_key: Some(private_key), - account_id: Some( - "alice@wonderland" - .parse() - .expect("This account ID should be valid"), - ), - torii_api_url: Some(format!("http://{DEFAULT_API_ADDR}").parse().unwrap()), - add_transaction_nonce: Some(true), - ..ConfigurationProxy::default() - } - .build() - .expect("Client config should build as all required fields were provided"); - let client = Client::new(&cfg).expect("Invalid client configuration"); + let client = Client::new(Config { + transaction_add_nonce: true, + ..config_factory() + }); let build_transaction = || client.build_transaction(Vec::::new(), UnlimitedMetadata::new()); @@ -1635,7 +1630,7 @@ mod tests { .with_executable(tx1.instructions().clone()) .with_metadata(tx1.metadata().clone()); - tx.set_creation_time(tx1.creation_time().as_millis().try_into().unwrap()); + tx.set_creation_time(tx1.creation_time()); if let Some(nonce) = tx1.nonce() { tx.set_nonce(nonce); } @@ -1650,34 +1645,13 @@ mod tests { #[test] fn authorization_header() { - let basic_auth = BasicAuth { - web_login: WebLogin::from_str(LOGIN).expect("Failed to create valid `WebLogin`"), - password: SmallStr::from_str(PASSWORD), - }; - - let cfg = ConfigurationProxy { - chain_id: Some(ChainId::new("0")), - public_key: Some( - "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" - .parse() - .expect("Public key not in mulithash format"), - ), - private_key: Some(crate::crypto::PrivateKey::from_hex( - crate::crypto::Algorithm::Ed25519, - "9AC47ABF59B356E0BD7DCBBBB4DEC080E302156A48CA907E47CB6AEA1D32719E7233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" - ).expect("Private key not hex encoded")), - account_id: Some( - "alice@wonderland" - .parse() - .expect("This account ID should be valid"), - ), - torii_api_url: Some(format!("http://{DEFAULT_API_ADDR}").parse().unwrap()), - basic_auth: Some(Some(basic_auth)), - ..ConfigurationProxy::default() - } - .build() - .expect("Client config should build as all required fields were provided"); - let client = Client::new(&cfg).expect("Invalid client configuration"); + let client = Client::new(Config { + basic_auth: Some(BasicAuth { + web_login: WebLogin::from_str(LOGIN).expect("Failed to create valid `WebLogin`"), + password: SmallStr::from_str(PASSWORD), + }), + ..config_factory() + }); let value = client .headers diff --git a/client/src/config.rs b/client/src/config.rs new file mode 100644 index 00000000000..c6010c834ec --- /dev/null +++ b/client/src/config.rs @@ -0,0 +1,122 @@ +//! Module for client-related configuration and structs + +use core::str::FromStr; +use std::{path::Path, time::Duration}; + +use derive_more::Display; +use eyre::Result; +use iroha_config::{ + base, + base::{FromEnv, StdEnv, UnwrapPartial}, +}; +use iroha_crypto::prelude::*; +use iroha_data_model::{prelude::*, ChainId}; +use iroha_primitives::small::SmallStr; +use serde::{Deserialize, Serialize}; +use serde_with::{DeserializeFromStr, SerializeDisplay}; +use url::Url; + +use crate::config::user::RootPartial; + +mod user; + +#[allow(missing_docs)] +pub const DEFAULT_TRANSACTION_TIME_TO_LIVE: Duration = Duration::from_secs(100); +#[allow(missing_docs)] +pub const DEFAULT_TRANSACTION_STATUS_TIMEOUT: Duration = Duration::from_secs(15); +#[allow(missing_docs)] +pub const DEFAULT_TRANSACTION_NONCE: bool = false; + +/// Valid web auth login string. See [`WebLogin::from_str`] +#[derive(Debug, Display, Clone, PartialEq, Eq, DeserializeFromStr, SerializeDisplay)] +pub struct WebLogin(SmallStr); + +impl FromStr for WebLogin { + type Err = eyre::ErrReport; + + /// Validates that the string is a valid web login + /// + /// # Errors + /// Fails if `login` contains `:` character, which is the binary representation of the '\0'. + fn from_str(login: &str) -> Result { + if login.contains(':') { + eyre::bail!("The `:` character, in `{login}` is not allowed"); + } + + Ok(Self(SmallStr::from_str(login))) + } +} + +/// Basic Authentication credentials +#[derive(Clone, Deserialize, Serialize, Debug, PartialEq, Eq)] +pub struct BasicAuth { + /// Login for Basic Authentication + pub web_login: WebLogin, + /// Password for Basic Authentication + pub password: SmallStr, +} + +/// Complete client configuration +#[derive(Clone, Debug, Serialize)] +#[allow(missing_docs)] +pub struct Config { + pub chain_id: ChainId, + pub account_id: AccountId, + pub key_pair: KeyPair, + pub basic_auth: Option, + // FIXME: or use `OnlyHttpUrl` here? + pub torii_api_url: Url, + pub transaction_ttl: Duration, + pub transaction_status_timeout: Duration, + pub transaction_add_nonce: bool, +} + +impl Config { + /// Loads configuration from a file + /// + /// # Errors + /// - unable to load config from a TOML file + /// - the config is invalid + pub fn load(path: impl AsRef) -> std::result::Result { + let config = RootPartial::from_toml(path)?; + let config = config.merge(RootPartial::from_env(&StdEnv)?); + Ok(config.unwrap_partial()?.parse()?) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn web_login_ok() { + let _ok = WebLogin::from_str("alice").expect("input is valid"); + } + + #[test] + fn web_login_bad() { + let _err = WebLogin::from_str("alice:wonderland").expect_err("input has `:`"); + } + + #[test] + fn parse_full_toml_config() { + let _: RootPartial = toml::toml! { + chain_id = "00000000-0000-0000-0000-000000000000" + torii_url = "http://127.0.0.1:8080/" + + [basic_auth] + web_login = "mad_hatter" + password = "ilovetea" + + [account] + id = "alice@wonderland" + public_key = "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" + private_key = { digest_function = "ed25519", payload = "9ac47abf59b356e0bd7dcbbbb4dec080e302156a48ca907e47cb6aea1d32719e7233bfc89dcbd68c19fde6ce6158225298ec1131b6a130d1aeb454c1ab5183c0" } + + [transaction] + time_to_live = 100_000 + status_timeout = 100_000 + nonce = false + }.try_into().unwrap(); + } +} diff --git a/client/src/config/user.rs b/client/src/config/user.rs new file mode 100644 index 00000000000..30a684e5bac --- /dev/null +++ b/client/src/config/user.rs @@ -0,0 +1,195 @@ +//! User configuration view. + +mod boilerplate; + +use std::{fs::File, io::Read, path::Path, str::FromStr, time::Duration}; + +pub use boilerplate::*; +use eyre::{eyre, Context, Report}; +use iroha_config::base::{Emitter, ErrorsCollection}; +use iroha_crypto::{KeyPair, PrivateKey, PublicKey}; +use iroha_data_model::{account::AccountId, ChainId}; +use merge::Merge; +use serde_with::DeserializeFromStr; +use url::Url; + +use crate::config::BasicAuth; + +impl RootPartial { + /// Reads the partial layer from TOML + /// + /// # Errors + /// - File not found + /// - Not valid TOML or content + pub fn from_toml(path: impl AsRef) -> eyre::Result { + let contents = { + let mut contents = String::new(); + File::open(path.as_ref()) + .wrap_err_with(|| { + eyre!("cannot open file at location `{}`", path.as_ref().display()) + })? + .read_to_string(&mut contents)?; + contents + }; + let layer: Self = toml::from_str(&contents).wrap_err("failed to parse toml")?; + Ok(layer) + } + + /// Merge other into self + #[must_use] + pub fn merge(mut self, other: Self) -> Self { + Merge::merge(&mut self, other); + self + } +} + +/// Root of the user configuration +#[derive(Clone, Debug)] +#[allow(missing_docs)] +pub struct Root { + pub chain_id: ChainId, + pub torii_url: OnlyHttpUrl, + pub basic_auth: Option, + pub account: Account, + pub transaction: Transaction, +} + +impl Root { + /// Validates user configuration for semantic errors and constructs a complete + /// [`super::Config`]. + /// + /// # Errors + /// If a set of validity errors occurs. + pub fn parse(self) -> Result> { + let Self { + chain_id, + torii_url, + basic_auth, + account: + Account { + id: account_id, + public_key, + private_key, + }, + transaction: + Transaction { + time_to_live: tx_ttl, + status_timeout: tx_timeout, + nonce: tx_add_nonce, + }, + } = self; + + let mut emitter = Emitter::new(); + + // TODO: validate if TTL is too small? + + if tx_timeout > tx_ttl { + // TODO: + // would be nice to provide a nice report with spans in the input + // pointing out source data in provided config files + // FIXME: explain why it should be smaller + emitter.emit(eyre!( + "transaction status timeout should be smaller than its time-to-live" + )) + } + + let key_pair = KeyPair::new(public_key, private_key) + .wrap_err("failed to construct a key pair") + .map_or_else( + |err| { + emitter.emit(err); + None + }, + Some, + ); + + emitter.finish()?; + + Ok(super::Config { + chain_id, + account_id, + key_pair: key_pair.unwrap(), + torii_api_url: torii_url.0, + basic_auth, + transaction_ttl: tx_ttl, + transaction_status_timeout: tx_timeout, + transaction_add_nonce: tx_add_nonce, + }) + } +} + +#[derive(Debug, Clone)] +#[allow(missing_docs)] +pub struct Account { + pub id: AccountId, + pub public_key: PublicKey, + pub private_key: PrivateKey, +} + +#[derive(Debug, Clone, Copy)] +#[allow(missing_docs)] +pub struct Transaction { + pub time_to_live: Duration, + pub status_timeout: Duration, + pub nonce: bool, +} + +/// A [`Url`] that might only have HTTP scheme inside +#[derive(Debug, Clone, Eq, PartialEq, DeserializeFromStr)] +pub struct OnlyHttpUrl(Url); + +impl FromStr for OnlyHttpUrl { + type Err = ParseHttpUrlError; + + fn from_str(s: &str) -> Result { + let url = Url::from_str(s)?; + if url.scheme() == "http" { + Ok(Self(url)) + } else { + Err(ParseHttpUrlError::NotHttp { + found: url.scheme().to_owned(), + }) + } + } +} + +/// Possible errors that might occur for [`FromStr::from_str`] for [`OnlyHttpUrl`]. +#[derive(Debug, thiserror::Error)] +pub enum ParseHttpUrlError { + /// Unable to parse the url + #[error(transparent)] + Parse(#[from] url::ParseError), + /// Parsed fine, but doesn't contain HTTP + #[error("expected `http` scheme, found: `{found}`")] + NotHttp { + /// What scheme was actually found + found: String, + }, +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use iroha_config::base::{FromEnv as _, TestEnv}; + + use super::*; + + #[test] + fn parses_all_envs() { + let env = TestEnv::new().set("TORII_URL", "http://localhost:8080"); + + let _layer = RootPartial::from_env(&env).expect("should not fail since env is valid"); + + assert_eq!(env.unvisited(), HashSet::new()) + } + + #[test] + fn non_http_url_error() { + let error = "https://localhost:1123" + .parse::() + .expect_err("should not allow https"); + + assert_eq!(format!("{error}"), "expected `http` scheme, found: `https`"); + } +} diff --git a/client/src/config/user/boilerplate.rs b/client/src/config/user/boilerplate.rs new file mode 100644 index 00000000000..500b13afecb --- /dev/null +++ b/client/src/config/user/boilerplate.rs @@ -0,0 +1,147 @@ +//! Code to be generated by a proc macro in future + +#![allow(missing_docs)] + +use std::error::Error; + +use iroha_config::base::{ + Emitter, FromEnv, HumanDuration, Merge, ParseEnvResult, UnwrapPartial, UnwrapPartialResult, + UserField, +}; +use iroha_crypto::{PrivateKey, PublicKey}; +use iroha_data_model::{account::AccountId, ChainId}; +use serde::Deserialize; + +use crate::config::{ + base::{FromEnvResult, ReadEnv}, + user::{Account, OnlyHttpUrl, Root, Transaction}, + BasicAuth, DEFAULT_TRANSACTION_NONCE, DEFAULT_TRANSACTION_STATUS_TIMEOUT, + DEFAULT_TRANSACTION_TIME_TO_LIVE, +}; + +#[derive(Debug, Clone, Deserialize, Eq, PartialEq, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct RootPartial { + pub chain_id: UserField, + pub torii_url: UserField, + pub basic_auth: UserField, + pub account: AccountPartial, + pub transaction: TransactionPartial, +} + +impl RootPartial { + #[allow(unused)] + pub fn new() -> Self { + // TODO: gen with macro + Self::default() + } +} + +impl FromEnv for RootPartial { + fn from_env>(env: &R) -> FromEnvResult + where + Self: Sized, + { + let mut emitter = Emitter::new(); + + let torii_url = + ParseEnvResult::parse_simple(&mut emitter, env, "TORII_URL", "torii_url").into(); + + emitter.finish()?; + + Ok(Self { + chain_id: None.into(), + torii_url, + basic_auth: None.into(), + account: AccountPartial::default(), + transaction: TransactionPartial::default(), + }) + } +} + +impl UnwrapPartial for RootPartial { + type Output = Root; + + fn unwrap_partial(self) -> UnwrapPartialResult { + let mut emitter = Emitter::new(); + + if self.chain_id.is_none() { + emitter.emit_missing_field("chain_id"); + } + if self.torii_url.is_none() { + emitter.emit_missing_field("torii_url"); + } + let account = emitter.try_unwrap_partial(self.account); + let transaction = emitter.try_unwrap_partial(self.transaction); + + emitter.finish()?; + + Ok(Root { + chain_id: self.chain_id.get().unwrap(), + torii_url: self.torii_url.get().unwrap(), + basic_auth: self.basic_auth.get(), + account: account.unwrap(), + transaction: transaction.unwrap(), + }) + } +} + +#[derive(Debug, Clone, Deserialize, Eq, PartialEq, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct AccountPartial { + pub id: UserField, + pub public_key: UserField, + pub private_key: UserField, +} + +impl UnwrapPartial for AccountPartial { + type Output = Account; + + fn unwrap_partial(self) -> UnwrapPartialResult { + let mut emitter = Emitter::new(); + + if self.id.is_none() { + emitter.emit_missing_field("account.id"); + } + if self.public_key.is_none() { + emitter.emit_missing_field("account.public_key"); + } + if self.private_key.is_none() { + emitter.emit_missing_field("account.private_key"); + } + + emitter.finish()?; + + Ok(Account { + id: self.id.get().unwrap(), + public_key: self.public_key.get().unwrap(), + private_key: self.private_key.get().unwrap(), + }) + } +} + +#[derive(Debug, Clone, Deserialize, Eq, PartialEq, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct TransactionPartial { + pub time_to_live: UserField, + pub status_timeout: UserField, + pub nonce: UserField, +} + +impl UnwrapPartial for TransactionPartial { + type Output = Transaction; + + fn unwrap_partial(self) -> UnwrapPartialResult { + Ok(Transaction { + time_to_live: self + .time_to_live + .get() + .map_or(DEFAULT_TRANSACTION_TIME_TO_LIVE, HumanDuration::get), + status_timeout: self + .status_timeout + .get() + .map_or(DEFAULT_TRANSACTION_STATUS_TIMEOUT, HumanDuration::get), + nonce: self.nonce.get().unwrap_or(DEFAULT_TRANSACTION_NONCE), + }) + } +} diff --git a/client/src/lib.rs b/client/src/lib.rs index 239a6eb5a7f..3ccb8fdb45c 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -2,6 +2,7 @@ /// Module with iroha client itself pub mod client; +pub mod config; /// Module with general communication primitives like an HTTP request builder. pub mod http; mod http_default; @@ -9,41 +10,33 @@ mod query_builder; /// Module containing sample configurations for tests and benchmarks. pub mod samples { + use url::Url; + use crate::{ - config::{torii::DEFAULT_API_ADDR, Configuration, ConfigurationProxy}, + config::{ + Config, DEFAULT_TRANSACTION_NONCE, DEFAULT_TRANSACTION_STATUS_TIMEOUT, + DEFAULT_TRANSACTION_TIME_TO_LIVE, + }, crypto::KeyPair, data_model::ChainId, }; /// Get sample client configuration. - pub fn get_client_config(chain_id: ChainId, key_pair: &KeyPair) -> Configuration { - let (public_key, private_key) = key_pair.clone().into(); - ConfigurationProxy { - chain_id: Some(chain_id), - public_key: Some(public_key), - private_key: Some(private_key), - account_id: Some( - "alice@wonderland" - .parse() - .expect("This account ID should be valid"), - ), - torii_api_url: Some( - format!("http://{DEFAULT_API_ADDR}") - .parse() - .expect("Should be a valid url"), - ), - ..ConfigurationProxy::default() + pub fn get_client_config(chain_id: ChainId, key_pair: KeyPair, torii_api_url: Url) -> Config { + Config { + chain_id, + key_pair, + torii_api_url, + account_id: "alice@wonderland" + .parse() + .expect("This account ID should be valid"), + basic_auth: None, + transaction_ttl: DEFAULT_TRANSACTION_TIME_TO_LIVE, + transaction_status_timeout: DEFAULT_TRANSACTION_STATUS_TIMEOUT, + transaction_add_nonce: DEFAULT_TRANSACTION_NONCE, } - .build() - .expect("Client config should build as all required fields were provided") } } -pub mod config { - //! Module for client-related configuration and structs - - pub use iroha_config::{client::*, client_api as api, path, torii::uri as torii}; -} - pub use iroha_crypto as crypto; pub use iroha_data_model as data_model; diff --git a/client/tests/integration/add_account.rs b/client/tests/integration/add_account.rs index d46b3bb65af..d7de69d5041 100644 --- a/client/tests/integration/add_account.rs +++ b/client/tests/integration/add_account.rs @@ -2,7 +2,7 @@ use std::thread; use eyre::Result; use iroha_client::{client, data_model::prelude::*}; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use test_network::*; #[test] @@ -11,7 +11,7 @@ fn client_add_account_with_name_length_more_than_limit_should_not_commit_transac let (_rt, _peer, test_client) = ::new().with_port(10_505).start_with_runtime(); wait_for_genesis_committed(&vec![test_client.clone()], 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); let normal_account_id: AccountId = "bob@wonderland".parse().expect("Valid"); let create_account = Register::account(Account::new(normal_account_id.clone(), [])); diff --git a/client/tests/integration/add_domain.rs b/client/tests/integration/add_domain.rs index bb889c25c15..d4cfe89c3b3 100644 --- a/client/tests/integration/add_domain.rs +++ b/client/tests/integration/add_domain.rs @@ -2,7 +2,7 @@ use std::thread; use eyre::Result; use iroha_client::{client, data_model::prelude::*}; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use test_network::*; #[test] @@ -10,7 +10,7 @@ fn client_add_domain_with_name_length_more_than_limit_should_not_commit_transact { let (_rt, _peer, test_client) = ::new().with_port(10_500).start_with_runtime(); wait_for_genesis_committed(&vec![test_client.clone()], 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); // Given diff --git a/client/tests/integration/asset.rs b/client/tests/integration/asset.rs index 31d77cc26ae..7838742fd71 100644 --- a/client/tests/integration/asset.rs +++ b/client/tests/integration/asset.rs @@ -6,7 +6,7 @@ use iroha_client::{ crypto::{KeyPair, PublicKey}, data_model::prelude::*, }; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use iroha_primitives::fixed::Fixed; use serde_json::json; use test_network::*; @@ -205,7 +205,7 @@ fn client_add_asset_with_decimal_should_increase_asset_amount() -> Result<()> { fn client_add_asset_with_name_length_more_than_limit_should_not_commit_transaction() -> Result<()> { let (_rt, _peer, test_client) = ::new().with_port(10_520).start_with_runtime(); wait_for_genesis_committed(&[test_client.clone()], 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); // Given let normal_asset_definition_id = AssetDefinitionId::from_str("xor#wonderland").expect("Valid"); @@ -277,7 +277,7 @@ fn find_rate_and_make_exchange_isi_should_succeed() { alice_id.clone(), ); - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let grant_asset_transfer_tx = TransactionBuilder::new(chain_id, asset_id.account_id().clone()) .with_instructions([allow_alice_to_transfer_asset]) diff --git a/client/tests/integration/asset_propagation.rs b/client/tests/integration/asset_propagation.rs index 6047dea994e..99a834017db 100644 --- a/client/tests/integration/asset_propagation.rs +++ b/client/tests/integration/asset_propagation.rs @@ -9,7 +9,7 @@ use iroha_client::{ prelude::*, }, }; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use test_network::*; #[test] @@ -18,7 +18,7 @@ fn client_add_asset_quantity_to_existing_asset_should_increase_asset_amount_on_a // Given let (_rt, network, client) = Network::start_test_with_runtime(4, Some(10_450)); wait_for_genesis_committed(&network.clients(), 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); client.submit_all_blocking( ParametersBuilder::new() diff --git a/client/tests/integration/burn_public_keys.rs b/client/tests/integration/burn_public_keys.rs index 2bd40263a08..a50bb6cec0e 100644 --- a/client/tests/integration/burn_public_keys.rs +++ b/client/tests/integration/burn_public_keys.rs @@ -13,7 +13,7 @@ fn submit( HashOf, eyre::Result>, ) { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let tx = if let Some((account_id, keypair)) = submitter { TransactionBuilder::new(chain_id, account_id) diff --git a/client/tests/integration/connected_peers.rs b/client/tests/integration/connected_peers.rs index 6cc1df7cf26..0bf17809514 100644 --- a/client/tests/integration/connected_peers.rs +++ b/client/tests/integration/connected_peers.rs @@ -8,7 +8,7 @@ use iroha_client::{ peer::Peer as DataModelPeer, }, }; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use iroha_primitives::unique_vec; use rand::{seq::SliceRandom, thread_rng, Rng}; use test_network::*; @@ -29,7 +29,7 @@ fn connected_peers_with_f_1_0_1() -> Result<()> { fn register_new_peer() -> Result<()> { let (_rt, network, _) = Network::start_test_with_runtime(4, Some(11_180)); wait_for_genesis_committed(&network.clients(), 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); let mut peer_clients: Vec<_> = Network::peers(&network) .zip(Network::clients(&network)) @@ -38,13 +38,13 @@ fn register_new_peer() -> Result<()> { check_status(&peer_clients, 1); // Start new peer - let mut configuration = Configuration::test(); - configuration.sumeragi.trusted_peers.peers = + let mut configuration = Config::test(); + configuration.sumeragi.trusted_peers = unique_vec![peer_clients.choose(&mut thread_rng()).unwrap().0.id.clone()]; let rt = Runtime::test(); let new_peer = rt.block_on( PeerBuilder::new() - .with_configuration(configuration) + .with_config(configuration) .with_into_genesis(WithGenesis::None) .with_port(11_225) .start(), @@ -75,7 +75,7 @@ fn connected_peers_with_f(faults: u64, start_port: Option) -> Result<()> { start_port, ); wait_for_genesis_committed(&network.clients(), 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); let mut peer_clients: Vec<_> = Network::peers(&network) .zip(Network::clients(&network)) diff --git a/client/tests/integration/domain_owner.rs b/client/tests/integration/domain_owner.rs index f36ce73df85..d2901928317 100644 --- a/client/tests/integration/domain_owner.rs +++ b/client/tests/integration/domain_owner.rs @@ -8,7 +8,7 @@ use test_network::*; #[test] fn domain_owner_domain_permissions() -> Result<()> { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let (_rt, _peer, test_client) = ::new().with_port(11_080).start_with_runtime(); wait_for_genesis_committed(&[test_client.clone()], 0); @@ -147,7 +147,7 @@ fn domain_owner_asset_definition_permissions() -> Result<()> { let (_rt, _peer, test_client) = ::new().with_port(11_085).start_with_runtime(); wait_for_genesis_committed(&[test_client.clone()], 0); - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let kingdom_id: DomainId = "kingdom".parse()?; let bob_id: AccountId = "bob@kingdom".parse()?; let rabbit_id: AccountId = "rabbit@kingdom".parse()?; @@ -212,7 +212,7 @@ fn domain_owner_asset_definition_permissions() -> Result<()> { #[test] fn domain_owner_asset_permissions() -> Result<()> { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let (_rt, _peer, test_client) = ::new().with_port(11_090).start_with_runtime(); wait_for_genesis_committed(&[test_client.clone()], 0); diff --git a/client/tests/integration/events/pipeline.rs b/client/tests/integration/events/pipeline.rs index 89d6c91bf0b..aa546b63153 100644 --- a/client/tests/integration/events/pipeline.rs +++ b/client/tests/integration/events/pipeline.rs @@ -8,7 +8,7 @@ use iroha_client::{ prelude::*, }, }; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use test_network::*; // Needed to re-enable ignored tests. @@ -41,7 +41,7 @@ fn test_with_instruction_and_status_and_port( Network::start_test_with_runtime(PEER_COUNT.try_into().unwrap(), Some(port)); let clients = network.clients(); wait_for_genesis_committed(&clients, 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); client.submit_all_blocking( ParametersBuilder::new() diff --git a/client/tests/integration/multiple_blocks_created.rs b/client/tests/integration/multiple_blocks_created.rs index ccc6c3b020e..1b56abe5590 100644 --- a/client/tests/integration/multiple_blocks_created.rs +++ b/client/tests/integration/multiple_blocks_created.rs @@ -9,7 +9,7 @@ use iroha_client::{ prelude::*, }, }; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use test_network::*; const N_BLOCKS: usize = 510; @@ -20,7 +20,7 @@ fn long_multiple_blocks_created() -> Result<()> { // Given let (_rt, network, client) = Network::start_test_with_runtime(4, Some(10_965)); wait_for_genesis_committed(&network.clients(), 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); client.submit_all_blocking( ParametersBuilder::new() diff --git a/client/tests/integration/multisignature_account.rs b/client/tests/integration/multisignature_account.rs index be7aa0a224f..c057ef04ea1 100644 --- a/client/tests/integration/multisignature_account.rs +++ b/client/tests/integration/multisignature_account.rs @@ -6,14 +6,14 @@ use iroha_client::{ crypto::KeyPair, data_model::prelude::*, }; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use test_network::*; #[test] fn transaction_signed_by_new_signatory_of_account_should_pass() -> Result<()> { let (_rt, peer, client) = ::new().with_port(10_605).start_with_runtime(); wait_for_genesis_committed(&[client.clone()], 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); // Given let account_id: AccountId = "alice@wonderland".parse().expect("Valid"); diff --git a/client/tests/integration/multisignature_transaction.rs b/client/tests/integration/multisignature_transaction.rs index 176e03061bb..66c218e32ff 100644 --- a/client/tests/integration/multisignature_transaction.rs +++ b/client/tests/integration/multisignature_transaction.rs @@ -3,14 +3,14 @@ use std::{str::FromStr as _, thread, time::Duration}; use eyre::Result; use iroha_client::{ client::{self, Client, QueryResult}, - config::Configuration as ClientConfiguration, + config::Config as ClientConfig, crypto::KeyPair, data_model::{ parameter::{default::MAX_TRANSACTIONS_IN_BLOCK, ParametersBuilder}, prelude::*, }, }; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use test_network::*; #[allow(clippy::too_many_lines)] @@ -18,7 +18,7 @@ use test_network::*; fn multisignature_transactions_should_wait_for_all_signatures() -> Result<()> { let (_rt, network, client) = Network::start_test_with_runtime(4, Some(10_945)); wait_for_genesis_committed(&network.clients(), 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); client.submit_all_blocking( ParametersBuilder::new() @@ -39,8 +39,8 @@ fn multisignature_transactions_should_wait_for_all_signatures() -> Result<()> { alice_id.clone(), ); - let mut client_configuration = ClientConfiguration::test(&network.genesis.api_address); - let client = Client::new(&client_configuration)?; + let mut client_config = ClientConfig::test(&network.genesis.api_address); + let client = Client::new(client_config.clone()); let instructions: [InstructionBox; 2] = [create_asset.into(), set_signature_condition.into()]; client.submit_all_blocking(instructions)?; @@ -49,24 +49,22 @@ fn multisignature_transactions_should_wait_for_all_signatures() -> Result<()> { let asset_id = AssetId::new(asset_definition_id, alice_id.clone()); let mint_asset = Mint::asset_quantity(quantity, asset_id.clone()); - let (public_key1, private_key1) = alice_key_pair.into(); - client_configuration.account_id = alice_id.clone(); - client_configuration.public_key = public_key1; - client_configuration.private_key = private_key1; - let client = Client::new(&client_configuration)?; + client_config.account_id = alice_id.clone(); + client_config.key_pair = alice_key_pair; + let client = Client::new(client_config.clone()); let instructions = [mint_asset.clone()]; let transaction = client.build_transaction(instructions, UnlimitedMetadata::new()); client.submit_transaction(&client.sign_transaction(transaction))?; thread::sleep(pipeline_time); //Then - client_configuration.torii_api_url = format!( + client_config.torii_api_url = format!( "http://{}", &network.peers.values().last().unwrap().api_address, ) .parse() .unwrap(); - let client_1 = Client::new(&client_configuration).expect("Invalid client configuration"); + let client_1 = Client::new(client_config.clone()); let request = client::asset::by_account_id(alice_id); let assets = client_1 .request(request.clone())? @@ -76,10 +74,9 @@ fn multisignature_transactions_should_wait_for_all_signatures() -> Result<()> { 2, // Alice has roses and cabbage from Genesis, but doesn't yet have camomile "Multisignature transaction was committed before all required signatures were added" ); - let (public_key2, private_key2) = key_pair_2.into(); - client_configuration.public_key = public_key2; - client_configuration.private_key = private_key2; - let client_2 = Client::new(&client_configuration)?; + + client_config.key_pair = key_pair_2; + let client_2 = Client::new(client_config); let instructions = [mint_asset]; let transaction = client_2.build_transaction(instructions, UnlimitedMetadata::new()); let transaction = client_2 diff --git a/client/tests/integration/offline_peers.rs b/client/tests/integration/offline_peers.rs index 7627c6ddfbd..d7ce1b1fc57 100644 --- a/client/tests/integration/offline_peers.rs +++ b/client/tests/integration/offline_peers.rs @@ -6,7 +6,7 @@ use iroha_client::{ prelude::*, }, }; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use iroha_crypto::KeyPair; use iroha_primitives::addr::socket_addr; use test_network::*; @@ -47,7 +47,7 @@ fn register_offline_peer() -> Result<()> { let (_rt, network, client) = Network::start_test_with_runtime(n_peers, Some(11_160)); wait_for_genesis_committed(&network.clients(), 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); let peer_clients = Network::clients(&network); check_status(&peer_clients, 1); diff --git a/client/tests/integration/permissions.rs b/client/tests/integration/permissions.rs index 5a9bf6881ee..0d95e964396 100644 --- a/client/tests/integration/permissions.rs +++ b/client/tests/integration/permissions.rs @@ -63,7 +63,7 @@ fn get_assets(iroha_client: &Client, id: &AccountId) -> Vec { #[test] #[ignore = "ignore, more in #2851"] fn permissions_disallow_asset_transfer() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let (_rt, _peer, iroha_client) = ::new().with_port(10_730).start_with_runtime(); wait_for_genesis_committed(&[iroha_client.clone()], 0); @@ -120,7 +120,7 @@ fn permissions_disallow_asset_transfer() { #[test] #[ignore = "ignore, more in #2851"] fn permissions_disallow_asset_burn() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let (_rt, _peer, iroha_client) = ::new().with_port(10_735).start_with_runtime(); @@ -195,7 +195,7 @@ fn account_can_query_only_its_own_domain() -> Result<()> { #[test] fn permissions_differ_not_only_by_names() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let (_rt, _not_drop, client) = ::new().with_port(10_745).start_with_runtime(); @@ -292,7 +292,7 @@ fn permissions_differ_not_only_by_names() { #[test] #[allow(deprecated)] fn stored_vs_granted_token_payload() -> Result<()> { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let (_rt, _peer, iroha_client) = ::new().with_port(10_730).start_with_runtime(); wait_for_genesis_committed(&[iroha_client.clone()], 0); diff --git a/client/tests/integration/restart_peer.rs b/client/tests/integration/restart_peer.rs index 6c62a7bb393..00433722636 100644 --- a/client/tests/integration/restart_peer.rs +++ b/client/tests/integration/restart_peer.rs @@ -5,7 +5,7 @@ use iroha_client::{ client::{self, Client, QueryResult}, data_model::prelude::*, }; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use rand::{seq::SliceRandom, thread_rng, Rng}; use test_network::*; use tokio::runtime::Runtime; @@ -21,7 +21,7 @@ fn restarted_peer_should_have_the_same_asset_amount() -> Result<()> { let (_rt, network, _) = Network::start_test_with_runtime(n_peers, Some(11_205)); wait_for_genesis_committed(&network.clients(), 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); let peer_clients = Network::clients(&network); let create_asset = diff --git a/client/tests/integration/roles.rs b/client/tests/integration/roles.rs index f5a3f266f95..f7cc75fdaa4 100644 --- a/client/tests/integration/roles.rs +++ b/client/tests/integration/roles.rs @@ -46,7 +46,7 @@ fn register_role_with_empty_token_params() -> Result<()> { /// @s8sato added: This test represents #2081 case. #[test] fn register_and_grant_role_for_metadata_access() -> Result<()> { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let (_rt, _peer, test_client) = ::new().with_port(10_700).start_with_runtime(); wait_for_genesis_committed(&vec![test_client.clone()], 0); diff --git a/client/tests/integration/triggers/time_trigger.rs b/client/tests/integration/triggers/time_trigger.rs index 9b9c76d3fe6..319467fc24a 100644 --- a/client/tests/integration/triggers/time_trigger.rs +++ b/client/tests/integration/triggers/time_trigger.rs @@ -5,7 +5,7 @@ use iroha_client::{ client::{self, Client, QueryResult}, data_model::{prelude::*, transaction::WasmSmartContract}, }; -use iroha_config::sumeragi::default::DEFAULT_CONSENSUS_ESTIMATION_MS; +use iroha_config::parameters::defaults::chain_wide::DEFAULT_CONSENSUS_ESTIMATION; use iroha_logger::info; use test_network::*; @@ -24,9 +24,9 @@ macro_rules! const_assert { #[test] #[allow(clippy::cast_precision_loss)] fn time_trigger_execution_count_error_should_be_less_than_15_percent() -> Result<()> { - const PERIOD_MS: u64 = 100; + const PERIOD: Duration = Duration::from_millis(100); const ACCEPTABLE_ERROR_PERCENT: u8 = 15; - const_assert!(PERIOD_MS < DEFAULT_CONSENSUS_ESTIMATION_MS); + const_assert!(PERIOD.as_millis() < DEFAULT_CONSENSUS_ESTIMATION.as_millis()); const_assert!(ACCEPTABLE_ERROR_PERCENT <= 100); let (_rt, _peer, mut test_client) = ::new().with_port(10_775).start_with_runtime(); @@ -42,8 +42,7 @@ fn time_trigger_execution_count_error_should_be_less_than_15_percent() -> Result let prev_value = get_asset_value(&mut test_client, asset_id.clone())?; - let schedule = - TimeSchedule::starting_at(start_time).with_period(Duration::from_millis(PERIOD_MS)); + let schedule = TimeSchedule::starting_at(start_time).with_period(PERIOD); let instruction = Mint::asset_quantity(1_u32, asset_id.clone()); let register_trigger = Register::trigger(Trigger::new( "mint_rose".parse()?, @@ -63,10 +62,10 @@ fn time_trigger_execution_count_error_should_be_less_than_15_percent() -> Result Duration::from_secs(1), 3, )?; - std::thread::sleep(Duration::from_millis(DEFAULT_CONSENSUS_ESTIMATION_MS)); + std::thread::sleep(DEFAULT_CONSENSUS_ESTIMATION); let finish_time = current_time(); - let average_count = finish_time.saturating_sub(start_time).as_millis() / u128::from(PERIOD_MS); + let average_count = finish_time.saturating_sub(start_time).as_millis() / PERIOD.as_millis(); let actual_value = get_asset_value(&mut test_client, asset_id)?; let expected_value = prev_value + u32::try_from(average_count)?; @@ -83,7 +82,7 @@ fn time_trigger_execution_count_error_should_be_less_than_15_percent() -> Result #[test] fn change_asset_metadata_after_1_sec() -> Result<()> { - const PERIOD_MS: u64 = 1000; + const PERIOD: Duration = Duration::from_secs(1); let (_rt, _peer, mut test_client) = ::new().with_port(10_660).start_with_runtime(); wait_for_genesis_committed(&vec![test_client.clone()], 0); @@ -96,7 +95,7 @@ fn change_asset_metadata_after_1_sec() -> Result<()> { let account_id = AccountId::from_str("alice@wonderland").expect("Valid"); let key = Name::from_str("petal")?; - let schedule = TimeSchedule::starting_at(start_time + Duration::from_millis(PERIOD_MS)); + let schedule = TimeSchedule::starting_at(start_time + PERIOD); let instruction = SetKeyValue::asset_definition(asset_definition_id.clone(), key.clone(), 3_u32.to_value()); let register_trigger = Register::trigger(Trigger::new( @@ -114,7 +113,7 @@ fn change_asset_metadata_after_1_sec() -> Result<()> { &mut test_client, &account_id, Duration::from_secs(1), - usize::try_from(PERIOD_MS / DEFAULT_CONSENSUS_ESTIMATION_MS + 1)?, + usize::try_from(PERIOD.as_millis() / DEFAULT_CONSENSUS_ESTIMATION.as_millis() + 1)?, )?; let value = test_client diff --git a/client/tests/integration/tx_history.rs b/client/tests/integration/tx_history.rs index 1a5fad4192d..c148b3410af 100644 --- a/client/tests/integration/tx_history.rs +++ b/client/tests/integration/tx_history.rs @@ -9,7 +9,7 @@ use iroha_client::{ client::{transaction, QueryResult}, data_model::{prelude::*, query::Pagination}, }; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use test_network::*; #[ignore = "ignore, more in #2851"] @@ -18,7 +18,7 @@ fn client_has_rejected_and_acepted_txs_should_return_tx_history() -> Result<()> let (_rt, _peer, client) = ::new().with_port(10_715).start_with_runtime(); wait_for_genesis_committed(&vec![client.clone()], 0); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); // Given let account_id = AccountId::from_str("alice@wonderland")?; diff --git a/client/tests/integration/unregister_peer.rs b/client/tests/integration/unregister_peer.rs index fb45c9db8c4..e73112ae920 100644 --- a/client/tests/integration/unregister_peer.rs +++ b/client/tests/integration/unregister_peer.rs @@ -9,7 +9,7 @@ use iroha_client::{ prelude::*, }, }; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use test_network::*; // Note the test is marked as `unstable`, not the network. @@ -60,7 +60,7 @@ fn check_assets( iroha_client .poll_request_with_period( client::asset::by_account_id(account_id.clone()), - Configuration::block_sync_gossip_time(), + Config::block_sync_gossip_time(), 15, |result| { let assets = result.collect::>>().expect("Valid"); @@ -100,7 +100,7 @@ fn init() -> Result<( AssetDefinitionId, )> { let (rt, network, client) = Network::start_test_with_runtime(4, Some(10_925)); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); iroha_logger::info!("Started"); let parameters = ParametersBuilder::new() .add_parameter(MAX_TRANSACTIONS_IN_BLOCK, 1u32)? diff --git a/client/tests/integration/unstable_network.rs b/client/tests/integration/unstable_network.rs index 84d1b2d9762..bfccf0bbad7 100644 --- a/client/tests/integration/unstable_network.rs +++ b/client/tests/integration/unstable_network.rs @@ -5,7 +5,7 @@ use iroha_client::{ client::{self, Client, QueryResult}, data_model::{prelude::*, Level}, }; -use iroha_config::iroha::Configuration; +use iroha_config::parameters::actual::Root as Config; use rand::seq::SliceRandom; use test_network::*; use tokio::runtime::Runtime; @@ -52,8 +52,9 @@ fn unstable_network( let rt = Runtime::test(); // Given let (network, iroha_client) = rt.block_on(async { - let mut configuration = Configuration::test(); - configuration.sumeragi.max_transactions_in_block = MAX_TRANSACTIONS_IN_BLOCK; + let mut configuration = Config::test(); + configuration.chain_wide.max_transactions_in_block = + MAX_TRANSACTIONS_IN_BLOCK.try_into().unwrap(); configuration.logger.level = Level::INFO; #[cfg(debug_assertions)] { @@ -72,7 +73,7 @@ fn unstable_network( }); wait_for_genesis_committed(&network.clients(), n_offline_peers); - let pipeline_time = Configuration::pipeline_time(); + let pipeline_time = Config::pipeline_time(); let account_id: AccountId = "alice@wonderland".parse().expect("Valid"); let asset_definition_id: AssetDefinitionId = "camomile#wonderland".parse().expect("Valid"); @@ -112,7 +113,7 @@ fn unstable_network( iroha_client .poll_request_with_period( client::asset::by_account_id(account_id.clone()), - Configuration::pipeline_time(), + Config::pipeline_time(), 4, |result| { let assets = result.collect::>>().expect("Valid"); diff --git a/client/tests/integration/upgrade.rs b/client/tests/integration/upgrade.rs index ec53fdbbe8b..b7d0f9a1c04 100644 --- a/client/tests/integration/upgrade.rs +++ b/client/tests/integration/upgrade.rs @@ -12,7 +12,7 @@ use test_network::*; #[test] fn executor_upgrade_should_work() -> Result<()> { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let (_rt, _peer, client) = ::new().with_port(10_795).start_with_runtime(); wait_for_genesis_committed(&vec![client.clone()], 0); diff --git a/client_cli/pytests/README.md b/client_cli/pytests/README.md index ad585372c95..f565dc71e26 100644 --- a/client_cli/pytests/README.md +++ b/client_cli/pytests/README.md @@ -54,6 +54,7 @@ The test model has the following structure: ```shell # Must be executed from the repo root: ./scripts/test_env.py setup + # Note: make sure you have installed packages from `./scripts/requirements.txt` ``` By default, this builds `iroha`, `iroha_client_cli`, and `kagami` binaries, and runs four peers with their API exposed through the `8080`-`8083` ports.\ @@ -64,7 +65,8 @@ The test model has the following structure: 3. Configure the tests by creating the following `.env` file in _this_ (`/client_cli/pytests/`) directory: ```shell - CLIENT_CLI_DIR=/path/to/iroha_client_cli/with/config.json/dir/ + CLIENT_CLI_BINARY=/path/to/iroha_client_cli + CLIENT_CLI_CONFIG=/path/to/config.toml TORII_API_PORT_MIN=8080 TORII_API_PORT_MAX=8083 ``` @@ -161,7 +163,8 @@ The variables: **Example**: ```shell -CLIENT_CLI_DIR=/path/to/iroha_client_cli/with/config.json/dir/ +CLIENT_CLI_BINARY=/path/to/iroha_client_cli +CLIENT_CLI_CONFIG=/path/to/config.toml TORII_API_PORT_MIN=8080 TORII_API_PORT_MAX=8083 ``` diff --git a/client_cli/pytests/common/settings.py b/client_cli/pytests/common/settings.py index c79aa7765f2..d5de68f753f 100644 --- a/client_cli/pytests/common/settings.py +++ b/client_cli/pytests/common/settings.py @@ -13,10 +13,9 @@ (os.path.dirname (os.path.abspath(__file__)))) -ROOT_DIR = os.environ.get("CLIENT_CLI_DIR", BASE_DIR) -PATH_CONFIG_CLIENT_CLI = os.path.join(ROOT_DIR, "config.json") -CLIENT_CLI_PATH = os.path.join(ROOT_DIR, "iroha_client_cli") +PATH_CONFIG_CLIENT_CLI = os.environ["CLIENT_CLI_CONFIG"] +CLIENT_CLI_PATH = os.environ["CLIENT_CLI_BINARY"] PORT_MIN = int(os.getenv('TORII_API_PORT_MIN', '8080')) PORT_MAX = int(os.getenv('TORII_API_PORT_MAX', '8083')) diff --git a/client_cli/pytests/poetry.lock b/client_cli/pytests/poetry.lock index 0da88839fa3..53341202632 100644 --- a/client_cli/pytests/poetry.lock +++ b/client_cli/pytests/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "allure-pytest" @@ -530,13 +530,13 @@ files = [ [[package]] name = "tomlkit" -version = "0.11.8" +version = "0.12.3" description = "Style preserving TOML library" optional = false python-versions = ">=3.7" files = [ - {file = "tomlkit-0.11.8-py3-none-any.whl", hash = "sha256:8c726c4c202bdb148667835f68d68780b9a003a9ec34167b6c673b38eff2a171"}, - {file = "tomlkit-0.11.8.tar.gz", hash = "sha256:9330fc7faa1db67b541b28e62018c17d20be733177d290a13b24c62d1614e0c3"}, + {file = "tomlkit-0.12.3-py3-none-any.whl", hash = "sha256:b0a645a9156dc7cb5d3a1f0d4bab66db287fcb8e0430bdd4664a095ea16414ba"}, + {file = "tomlkit-0.12.3.tar.gz", hash = "sha256:75baf5012d06501f07bee5bf8e801b9f343e7aac5a92581f20f80ce632e6b5a4"}, ] [[package]] @@ -637,4 +637,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "^3.11" -content-hash = "8c3a17410644637cb551ef878cacf0d76e83eded4d32af50e1312f934e24639b" +content-hash = "101321a5a8443974ff254d60b75c4a59c0da6a8b2e2387a8a3f666692a58b834" diff --git a/client_cli/pytests/pyproject.toml b/client_cli/pytests/pyproject.toml index 0fdeaaead5f..707d8df5d4b 100644 --- a/client_cli/pytests/pyproject.toml +++ b/client_cli/pytests/pyproject.toml @@ -11,6 +11,7 @@ faker = "*" allure-python-commons = "*" cryptography = "*" python-dotenv = "*" +tomlkit = "^0.12.3" [tool.poetry.dev-dependencies] pytest = "*" diff --git a/client_cli/pytests/src/client_cli/client_cli.py b/client_cli/pytests/src/client_cli/client_cli.py index cdd33b58c59..dfdc8629ef8 100644 --- a/client_cli/pytests/src/client_cli/client_cli.py +++ b/client_cli/pytests/src/client_cli/client_cli.py @@ -254,19 +254,21 @@ def execute(self, command=None): :return: The current ClientCli object. :rtype: ClientCli """ + self.config.randomise_torii_url() if command is None: command = self.command else: command = [self.BASE_PATH] + self.BASE_FLAGS + command.split() allure_command = ' '.join(map(str, command[3:])) print(allure_command) - with allure.step(f'{allure_command} on the {str(self.config.torii_api_port)} peer'): + with allure.step(f'{allure_command} on the {str(self.config.torii_url)} peer'): try: with subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - text=True + text=True, + env=self.config.env ) as process: self.stdout, self.stderr = process.communicate() allure.attach( diff --git a/client_cli/pytests/src/client_cli/configuration.py b/client_cli/pytests/src/client_cli/configuration.py index 2f69e4edc57..a80b0e50202 100644 --- a/client_cli/pytests/src/client_cli/configuration.py +++ b/client_cli/pytests/src/client_cli/configuration.py @@ -2,7 +2,7 @@ This module provides a Config class to manage Iroha network configuration. """ -import json +import tomlkit import os import random from urllib.parse import urlparse @@ -11,8 +11,8 @@ class Config: """ Configuration class to handle Iroha network configuration. The class provides methods for loading - the configuration from a file, updating the TORII_API_URL with a random port number from the specified - range, and accessing the configuration values. + the configuration from a file, accessing the configuration values, and randomising Torii URL + to access different peers. :param port_min: The minimum port number for the TORII_API_URL. :type port_min: int @@ -24,6 +24,7 @@ def __init__(self, port_min, port_max): self.file = None self.port_min = port_min self.port_max = port_max + self._envs = dict() def load(self, path_config_client_cli): """ @@ -36,34 +37,40 @@ def load(self, path_config_client_cli): if not os.path.exists(path_config_client_cli): raise IOError(f"No config file found at {path_config_client_cli}") with open(path_config_client_cli, 'r', encoding='utf-8') as config_file: - self._config = json.load(config_file) + self._config = tomlkit.load(config_file) self.file = path_config_client_cli - def update_torii_api_port(self): + def randomise_torii_url(self): """ - Update the TORII_API_URL configuration value - with a random port number from the specified range. + Update Torii URL. + Note that in order for update to take effect, + `self.env` should be used when executing the client cli. :return: None """ - if self._config is None: - raise ValueError("No configuration loaded. Use load_config(path_config_client_cli) to load the configuration.") - parsed_url = urlparse(self._config['TORII_API_URL']) - new_netloc = parsed_url.hostname + ':' + str(random.randint(self.port_min, self.port_max)) - self._config['TORII_API_URL'] = parsed_url._replace(netloc=new_netloc).geturl() - with open(self.file, 'w', encoding='utf-8') as config_file: - json.dump(self._config, config_file) + parsed_url = urlparse(self._config["torii_url"]) + random_port = random.randint(self.port_min, self.port_max) + self._envs["TORII_URL"] = parsed_url._replace(netloc=f"{parsed_url.hostname}:{random_port}").geturl() @property - def torii_api_port(self): + def torii_url(self): """ - Get the TORII_API_URL configuration value after updating the port number. + Get the Torii URL set in ENV vars. - :return: The updated TORII_API_URL. + :return: Torii URL :rtype: str """ - self.update_torii_api_port() - return self._config['TORII_API_URL'] + return self._envs["TORII_URL"] + + @property + def env(self): + """ + Get the environment variables set to execute the client cli with. + + :return: Dictionary with env vars (mixed with existing OS vars) + :rtype: dict + """ + return {**os.environ, **self._envs} @property def account_id(self): @@ -73,7 +80,7 @@ def account_id(self): :return: The ACCOUNT_ID. :rtype: str """ - return self._config['ACCOUNT_ID'] + return self._config['account']["id"] @property def account_name(self): @@ -103,4 +110,4 @@ def public_key(self): :return: The public key. :rtype: str """ - return self._config['PUBLIC_KEY'].split('ed0120')[1] + return self._config["account"]['public_key'].split('ed0120')[1] diff --git a/client_cli/pytests/src/client_cli/iroha.py b/client_cli/pytests/src/client_cli/iroha.py index 38174fefeac..e62e694dc6f 100644 --- a/client_cli/pytests/src/client_cli/iroha.py +++ b/client_cli/pytests/src/client_cli/iroha.py @@ -51,7 +51,12 @@ def domains(self) -> Dict[str, Dict]: :rtype: List[str] """ self._execute_command('domain') - domains = json.loads(self.stdout) + try: + domains = json.loads(self.stdout) + except json.decoder.JSONDecodeError as e: + print(f"JSON decode error occurred with this input:", self.stdout) + print(f"STDERR:", self.stderr) + raise domains_dict = { domain["id"]: domain for domain in domains } return domains_dict diff --git a/client_cli/src/main.rs b/client_cli/src/main.rs index 80a45b35066..25e8eaf93c0 100644 --- a/client_cli/src/main.rs +++ b/client_cli/src/main.rs @@ -16,10 +16,9 @@ use dialoguer::Confirm; use erased_serde::Serialize; use iroha_client::{ client::{Client, QueryResult}, - config::{path::Path, Configuration as ClientConfiguration, ConfigurationProxy}, + config::Config, data_model::prelude::*, }; -use iroha_config_base::proxy::{LoadFromDisk, LoadFromEnv, Override}; use iroha_primitives::addr::{Ipv4Addr, Ipv6Addr, SocketAddr}; /// Re-usable clap `--metadata ` (`-m`) argument. @@ -90,21 +89,9 @@ impl FromStr for ValueArg { #[derive(clap::Parser, Debug)] #[command(name = "iroha_client_cli", version = concat!("version=", env!("CARGO_PKG_VERSION"), " git_commit_sha=", env!("VERGEN_GIT_SHA")), author)] struct Args { - /// Path to the configuration file, defaults to `config.json`/`config.json5` - /// - /// Supported extensions are `.json` and `.json5`. By default, Iroha Client looks for a - /// `config` file with one of the supported extensions in the current working directory. - /// If the default config file is not found, Iroha will rely on default values and environment - /// variables. However, if the config path is set explicitly with this argument and the file - /// is not found, Iroha Client will exit with an error. - #[arg( - short, - long, - value_name("PATH"), - value_hint(clap::ValueHint::FilePath), - value_parser(Path::user_provided_str) - )] - config: Option, + /// Path to the configuration file + #[arg(short, long, value_name("PATH"), value_hint(clap::ValueHint::FilePath))] + config: PathBuf, /// More verbose output #[arg(short, long)] verbose: bool, @@ -146,7 +133,11 @@ enum Subcommand { /// Context inside which command is executed trait RunContext { /// Get access to configuration - fn configuration(&self) -> &ClientConfiguration; + fn configuration(&self) -> &Config; + + fn client_from_config(&self) -> Client { + Client::new(self.configuration().clone()) + } /// Skip check for MST fn skip_mst_check(&self) -> bool; @@ -161,12 +152,12 @@ trait RunContext { struct PrintJsonContext { write: W, - config: ClientConfiguration, + config: Config, skip_mst_check: bool, } impl RunContext for PrintJsonContext { - fn configuration(&self) -> &ClientConfiguration { + fn configuration(&self) -> &Config { &self.config } @@ -204,14 +195,13 @@ impl RunArgs for Subcommand { } } -// TODO: move into config. +// TODO: move into config? const RETRY_COUNT_MST: u32 = 1; const RETRY_IN_MST: Duration = Duration::from_millis(100); -static DEFAULT_CONFIG_PATH: &str = "config"; - fn main() -> Result<()> { color_eyre::install()?; + let Args { config: config_path, subcommand, @@ -219,22 +209,7 @@ fn main() -> Result<()> { skip_mst_check, } = clap::Parser::parse(); - let config = ConfigurationProxy::default(); - let config = if let Some(path) = config_path - .unwrap_or_else(|| Path::default(DEFAULT_CONFIG_PATH)) - .try_resolve() - .wrap_err("Failed to resolve config file")? - { - config.override_with(ConfigurationProxy::from_path(&*path)) - } else { - config - }; - let config = config.override_with( - ConfigurationProxy::from_std_env().wrap_err("Failed to read config from ENV")?, - ); - let config = config - .build() - .wrap_err("Failed to finalize configuration")?; + let config = Config::load(config_path)?; if verbose { eprintln!( @@ -263,7 +238,7 @@ fn submit( metadata: UnlimitedMetadata, context: &mut dyn RunContext, ) -> Result<()> { - let iroha_client = Client::new(context.configuration())?; + let iroha_client = context.client_from_config(); let instructions = instructions.into(); let tx = iroha_client.build_transaction(instructions, metadata); let transactions = if context.skip_mst_check() { @@ -322,7 +297,6 @@ mod filter { } mod events { - use iroha_client::client::Client; use super::*; @@ -349,7 +323,7 @@ mod events { } fn listen(filter: FilterBox, context: &mut dyn RunContext) -> Result<()> { - let iroha_client = Client::new(context.configuration())?; + let iroha_client = context.client_from_config(); eprintln!("Listening to events with filter: {filter:?}"); iroha_client .listen_for_events(filter) @@ -362,8 +336,6 @@ mod events { mod blocks { use std::num::NonZeroU64; - use iroha_client::client::Client; - use super::*; /// Get block stream from iroha peer @@ -381,7 +353,7 @@ mod blocks { } fn listen(height: NonZeroU64, context: &mut dyn RunContext) -> Result<()> { - let iroha_client = Client::new(context.configuration())?; + let iroha_client = context.client_from_config(); eprintln!("Listening to blocks from height: {height}"); iroha_client .listen_for_blocks(height) @@ -446,7 +418,7 @@ mod domain { impl RunArgs for List { fn run(self, context: &mut dyn RunContext) -> Result<()> { - let client = Client::new(context.configuration())?; + let client = context.client_from_config(); let vec = match self { Self::All => client @@ -682,7 +654,7 @@ mod account { impl RunArgs for List { fn run(self, context: &mut dyn RunContext) -> Result<()> { - let client = Client::new(context.configuration())?; + let client = context.client_from_config(); let vec = match self { Self::All => client @@ -752,7 +724,7 @@ mod account { impl RunArgs for ListPermissions { fn run(self, context: &mut dyn RunContext) -> Result<()> { - let client = Client::new(context.configuration())?; + let client = context.client_from_config(); let find_all_permissions = FindPermissionTokensByAccountId::new(self.id); let permissions = client .request(find_all_permissions) @@ -765,7 +737,7 @@ mod account { mod asset { use iroha_client::{ - client::{self, asset, Client}, + client::{self, asset}, data_model::{asset::AssetDefinition, name::Name}, }; @@ -939,7 +911,7 @@ mod asset { impl RunArgs for Get { fn run(self, context: &mut dyn RunContext) -> Result<()> { let Self { asset_id } = self; - let iroha_client = Client::new(context.configuration())?; + let iroha_client = context.client_from_config(); let asset = iroha_client .request(asset::by_id(asset_id)) .wrap_err("Failed to get asset.")?; @@ -959,7 +931,7 @@ mod asset { impl RunArgs for List { fn run(self, context: &mut dyn RunContext) -> Result<()> { - let client = Client::new(context.configuration())?; + let client = context.client_from_config(); let vec = match self { Self::All => client @@ -1033,7 +1005,7 @@ mod asset { impl RunArgs for GetKeyValue { fn run(self, context: &mut dyn RunContext) -> Result<()> { let Self { asset_id, key } = self; - let client = Client::new(context.configuration())?; + let client = context.client_from_config(); let find_key_value = FindAssetKeyValueByIdAndKey::new(asset_id, key); let asset = client .request(find_key_value) diff --git a/config/Cargo.toml b/config/Cargo.toml index d6df71128fa..a976320e776 100644 --- a/config/Cargo.toml +++ b/config/Cargo.toml @@ -23,6 +23,7 @@ tracing-subscriber = { workspace = true, features = ["fmt", "ansi"] } url = { workspace = true, features = ["serde"] } serde = { workspace = true, features = ["derive"] } +serde_with = { workspace = true } strum = { workspace = true, features = ["derive"] } serde_json = { workspace = true } json5 = { workspace = true } @@ -31,11 +32,16 @@ displaydoc = { workspace = true } derive_more = { workspace = true } cfg-if = { workspace = true } once_cell = { workspace = true } +nonzero_ext = { workspace = true } +toml = { workspace = true } +merge = "0.1.0" [dev-dependencies] proptest = "1.3.1" stacker = "0.1.15" expect-test = { workspace = true } +trybuild = { workspace = true } +hex = { workspace = true } [features] tokio-console = [] diff --git a/config/base/Cargo.toml b/config/base/Cargo.toml index b11b28ea577..b5b469bc524 100644 --- a/config/base/Cargo.toml +++ b/config/base/Cargo.toml @@ -11,15 +11,14 @@ license.workspace = true workspace = true [dependencies] -iroha_config_derive = { workspace = true } -iroha_crypto = { workspace = true, features = ["std"] } - -serde = { workspace = true, default-features = false, features = ["derive"] } -serde_json = { workspace = true, features = ["alloc"] } -parking_lot = { workspace = true } -json5 = { workspace = true } -thiserror = { workspace = true } -displaydoc = { workspace = true } -crossbeam = { workspace = true } +merge = "0.1.0" +drop_bomb = { workspace = true } +derive_more = { workspace = true, features = ["from", "deref", "deref_mut"] } eyre = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_with = { workspace = true, features = ["macros", "std"] } +thiserror = { workspace = true } +num-traits = "0.2.17" +[dev-dependencies] +toml = { workspace = true } diff --git a/config/base/derive/Cargo.toml b/config/base/derive/Cargo.toml deleted file mode 100644 index 8aa95845755..00000000000 --- a/config/base/derive/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "iroha_config_derive" - -edition.workspace = true -version.workspace = true -authors.workspace = true - -license.workspace = true - -[lints] -workspace = true - -[lib] -proc-macro = true - -[dependencies] -iroha_macro_utils = { workspace = true } - -syn = { workspace = true, features = ["derive", "parsing", "proc-macro", "clone-impls", "printing"] } -# This is the maximally compressed set of features. Yes we also need "printing". -quote = { workspace = true } -proc-macro2 = { workspace = true } -proc-macro-error = { workspace = true } - diff --git a/config/base/derive/src/lib.rs b/config/base/derive/src/lib.rs deleted file mode 100644 index 0cd24e4e345..00000000000 --- a/config/base/derive/src/lib.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! Contains various configuration related macro definitions. - -use proc_macro::TokenStream; - -pub(crate) mod proxy; -pub(crate) mod utils; -pub(crate) mod view; - -/// Derive for config loading. More details in `iroha_config_base` reexport -#[proc_macro_derive(Override, attributes(config))] -pub fn override_derive(input: TokenStream) -> TokenStream { - let ast = syn::parse_macro_input!(input as utils::StructWithFields); - proxy::impl_override(&ast) -} - -/// Derive for config querying and setting. More details in `iroha_config_base` reexport -#[proc_macro_derive(Builder, attributes(builder))] -pub fn builder_derive(input: TokenStream) -> TokenStream { - let ast = syn::parse_macro_input!(input as utils::StructWithFields); - proxy::impl_build(&ast) -} - -/// Derive for config querying and setting. More details in `iroha_config_base` reexport -#[proc_macro_error::proc_macro_error] -#[proc_macro_derive(LoadFromEnv, attributes(config))] -pub fn load_from_env_derive(input: TokenStream) -> TokenStream { - let ast = syn::parse_macro_input!(input as utils::StructWithFields); - proxy::impl_load_from_env(&ast) -} - -/// Derive for config querying and setting. More details in `iroha_config_base` reexport -#[proc_macro_derive(LoadFromDisk)] -pub fn load_from_disk_derive(input: TokenStream) -> TokenStream { - let ast = syn::parse_macro_input!(input as utils::StructWithFields); - proxy::impl_load_from_disk(&ast) -} - -/// Derive for config querying and setting. More details in `iroha_config_base` reexport -#[proc_macro_derive(Proxy, attributes(config))] -pub fn proxy_derive(input: TokenStream) -> TokenStream { - let ast = syn::parse_macro_input!(input as utils::StructWithFields); - proxy::impl_proxy(ast) -} - -/// Generate view for given struct and convert from type to its view. -/// More details in `iroha_config_base` reexport. -#[proc_macro] -pub fn view(input: TokenStream) -> TokenStream { - let ast = syn::parse_macro_input!(input as utils::StructWithFields); - view::impl_view(ast) -} diff --git a/config/base/derive/src/proxy.rs b/config/base/derive/src/proxy.rs deleted file mode 100644 index dafef4c6145..00000000000 --- a/config/base/derive/src/proxy.rs +++ /dev/null @@ -1,324 +0,0 @@ -use proc_macro::TokenStream; -use proc_macro_error::abort; -use quote::{format_ident, quote}; -use syn::{parse_quote, Type, TypePath}; - -use super::utils::{get_inner_type, StructWithFields}; -use crate::utils; - -pub fn impl_proxy(ast: StructWithFields) -> TokenStream { - let parent_name = &ast.ident; - let parent_ty: Type = parse_quote! { #parent_name }; - let proxy_struct = gen_proxy_struct(ast); - let loadenv_derive = quote! { ::iroha_config_base::derive::LoadFromEnv }; - let disk_derive = quote! { ::iroha_config_base::derive::LoadFromDisk }; - let builder_derive = quote! { ::iroha_config_base::derive::Builder }; - let override_derive = quote! { ::iroha_config_base::derive::Override }; - quote! { - /// Proxy configuration structure to be used as an intermediate - /// for configuration loading. Both loading from disk and - /// from env should only be done via this struct, which then - /// builds into its parent [`struct@Configuration`]. - #[derive(Debug, Clone, serde::Serialize, serde::Deserialize, - #builder_derive, - #loadenv_derive, - #disk_derive, - #override_derive - )] - #[builder(parent = #parent_ty)] - #proxy_struct - - } - .into() -} - -pub fn impl_override(ast: &StructWithFields) -> TokenStream { - let override_trait = quote! { ::iroha_config_base::proxy::Override }; - let name = &ast.ident; - let clauses = ast.fields.iter().map(|field| { - let field_name = &field.ident; - if field.has_inner { - let inner_ty = get_inner_type("Option", &field.ty); - quote! { - self.#field_name = match (self.#field_name, other.#field_name) { - (Some(this_field), Some(other_field)) => Some(<#inner_ty as #override_trait>::override_with(this_field, other_field)), - (this_field, None) => this_field, - (None, other_field) => other_field, - }; - } - } else { - quote! { - if let Some(other_field) = other.#field_name { - self.#field_name = Some(other_field) - } - } - } - }); - - quote! { - impl #override_trait for #name { - fn override_with(mut self, other: Self) -> Self { - #(#clauses)* - self - } - } - } - .into() -} - -pub fn impl_load_from_env(ast: &StructWithFields) -> TokenStream { - let env_fetcher_ident = quote! { env_fetcher }; - let fetch_env_trait = quote! { ::iroha_config_base::proxy::FetchEnv }; - let env_trait = quote! { ::iroha_config_base::proxy::LoadFromEnv }; - - let set_field = ast.fields - .iter() - .map(|field| { - let ty = &field.ty; - let as_str_attr = field.has_as_str; - let ident = &field.ident; - let field_env = &field.env_str; - - let inner_ty = if field.has_option { - get_inner_type("Option", ty) - } else { - abort!(ast, "This macro should only be used on `ConfigurationProxy` types, \ - i.e. the types which represent a partially finalised configuration \ - (with some required fields omitted and to be read from other sources). \ - These types' fields have the `Option` type wrapped around each of them.") - }; - let is_string = if let Type::Path(TypePath { path, .. }) = inner_ty { - path.is_ident("String") - } else { - false - }; - let inner = if is_string { - quote! { Ok(var) } - } else if as_str_attr { - quote! {{ - let value: ::serde_json::Value = var.into(); - ::json5::from_str(&value.to_string()) - }} - } else { - quote! { ::json5::from_str(&var) } - }; - let mut set_field = quote! { - let #ident = #env_fetcher_ident.fetch(#field_env) - // treating unicode errors the same as variable absence - .ok() - .map(|var| { - #inner.map_err(|err| { - ::iroha_config_base::derive::Error::field_deserialization_from_json5( - // FIXME: specify location precisely - // https://github.com/hyperledger/iroha/issues/3470 - #field_env, - &err - ) - }) - }) - .transpose()?; - }; - if field.has_inner { - let maybe_map_box = gen_maybe_map_box(inner_ty); - set_field.extend(quote! { - let inner_proxy = <#inner_ty as #env_trait>::from_env(#env_fetcher_ident) - #maybe_map_box - ?; - let #ident = if let Some(old_inner) = #ident { - Some(<#inner_ty as ::iroha_config_base::proxy::Override>::override_with(old_inner, inner_proxy)) - } else { - Some(inner_proxy) - }; - }); - } - set_field - }); - - let name = &ast.ident; - let fields = ast - .fields - .iter() - .map(|field| { - let ident = &field.ident; - quote! { #ident } - }) - .collect::>(); - quote! { - impl #env_trait for #name { - type ReturnValue = Result; - fn from_env(#env_fetcher_ident: &F) -> Self::ReturnValue { - #(#set_field)* - let proxy = #name { - #(#fields),* - }; - Ok(proxy) - } - } - } - .into() -} - -pub fn impl_load_from_disk(ast: &StructWithFields) -> TokenStream { - let proxy_name = &ast.ident; - let disk_trait = quote! { ::iroha_config_base::proxy::LoadFromDisk }; - let error_ty = quote! { ::iroha_config_base::derive::Error }; - let disk_err_variant = quote! { ::iroha_config_base::derive::Error::Disk }; - let serde_err_variant = quote! { ::iroha_config_base::derive::Error::Json5 }; - let none_proxy = gen_none_fields_proxy(ast); - quote! { - impl #disk_trait for #proxy_name { - type ReturnValue = Self; - fn from_path + ::std::fmt::Debug + Clone>(path: P) -> Self::ReturnValue { - let mut file = ::std::fs::File::open(path).map_err(#disk_err_variant); - // String has better parsing speed, see [issue](https://github.com/serde-rs/json/issues/160#issuecomment-253446892) - let mut s = String::new(); - let res = file - .and_then(|mut f| { - ::std::io::Read::read_to_string(&mut f, &mut s).map(move |_| s).map_err(#disk_err_variant) - }) - .and_then( - |s| -> ::core::result::Result { - json5::from_str(&s).map_err(#serde_err_variant) - }, - ) - .map_or(#none_proxy, ::std::convert::identity); - res - } - } - }.into() -} - -fn gen_proxy_struct(mut ast: StructWithFields) -> StructWithFields { - // As this changes the field types of the AST, `lvalue_read` - // and `lvalue_write` of its `StructField`s may get desynchronized - ast.fields.iter_mut().for_each(|field| { - // For fields of `Configuration` that have an inner config, the corresponding - // proxy field should have a `..Proxy` type there as well - if field.has_inner { - proxify_field_type(&mut field.ty); - } - let ty = &field.ty; - field.ty = parse_quote! { - Option<#ty> - }; - // - field - .attrs - .retain(|attr| attr.path.is_ident("doc") || attr.path.is_ident("config")); - // Fields that already wrap an option should have a - // custom deserializer so that json `null` becomes - // `Some(None)` and not just `None` - if field.has_option { - let de_helper = stringify! { ::iroha_config_base::proxy::some_option }; - let serde_attr: syn::Attribute = - parse_quote! { #[serde(default, deserialize_with = #de_helper)] }; - field.attrs.push(serde_attr); - } - field.has_option = true; - }); - ast.ident = format_ident!("{}Proxy", ast.ident); - // The only needed struct-level attributes are these - ast.attrs.retain(|attr| { - attr.path.is_ident("config") || attr.path.is_ident("serde") || attr.path.is_ident("cfg") - }); - ast -} - -#[allow(clippy::expect_used)] -pub fn proxify_field_type(field_ty: &mut syn::Type) { - if let Type::Path(path) = field_ty { - let last_segment = path.path.segments.last_mut().expect("Can't be empty"); - if last_segment.ident == "Box" { - let box_generic = utils::extract_box_generic(last_segment); - // Recursion - proxify_field_type(box_generic) - } else { - // TODO: Wouldn't it be better to get it as an associated type? - let new_ident = format_ident!("{}Proxy", last_segment.ident); - last_segment.ident = new_ident; - } - } -} - -pub fn impl_build(ast: &StructWithFields) -> TokenStream { - let checked_fields = gen_none_fields_check(ast); - let proxy_name = &ast.ident; - let parent_ty = utils::get_parent_ty(ast); - let builder_trait = quote! { ::iroha_config_base::proxy::Builder }; - let error_ty = quote! { ::iroha_config_base::derive::Error }; - - quote! { - impl #builder_trait for #proxy_name { - type ReturnValue = Result<#parent_ty, #error_ty>; - fn build(self) -> Self::ReturnValue { - Ok(#parent_ty { - #checked_fields - }) - } - } - } - .into() -} - -/// Helper function to be used in [`impl Builder`]. Verifies that all fields have -/// been initialized. -fn gen_none_fields_check(ast: &StructWithFields) -> proc_macro2::TokenStream { - let checked_fields = ast.fields.iter().map(|field| { - let ident = &field.ident; - let missing_field = quote! { ::iroha_config_base::derive::Error::MissingField }; - if field.has_inner { - let inner_ty = get_inner_type("Option", &field.ty); - let builder_trait = quote! { ::iroha_config_base::proxy::Builder }; - - let maybe_map_box = gen_maybe_map_box(inner_ty); - - quote! { - #ident: <#inner_ty as #builder_trait>::build( - self.#ident.ok_or( - #missing_field{field: stringify!(#ident), message: ""} - )? - ) - #maybe_map_box - ? - } - } else { - quote! { - #ident: self.#ident.ok_or( - #missing_field{field: stringify!(#ident), message: ""} - )? - } - } - }); - quote! { - #(#checked_fields),* - } -} - -fn gen_maybe_map_box(inner_ty: &syn::Type) -> proc_macro2::TokenStream { - if let Type::Path(path) = &inner_ty { - let last_segment = path.path.segments.last().expect("Can't be empty"); - if last_segment.ident == "Box" { - return quote! { - .map(Box::new) - }; - } - } - quote! {} -} - -/// Helper function to be used as an empty fallback for [`impl LoadFromEnv`] or [`impl LoadFromDisk`]. -/// Only meant for proxy types usage. -fn gen_none_fields_proxy(ast: &StructWithFields) -> proc_macro2::TokenStream { - let proxy_name = &ast.ident; - let none_fields = ast.fields.iter().map(|field| { - let ident = &field.ident; - quote! { - #ident: None - } - }); - quote! { - #proxy_name { - #(#none_fields),* - } - } -} diff --git a/config/base/derive/src/utils.rs b/config/base/derive/src/utils.rs deleted file mode 100644 index 36f79a76384..00000000000 --- a/config/base/derive/src/utils.rs +++ /dev/null @@ -1,367 +0,0 @@ -pub use iroha_macro_utils::{attr_struct, AttrParser}; -use proc_macro2::TokenStream; -use quote::{quote, ToTokens}; -use syn::{ - parse::{Parse, ParseStream}, - Attribute, GenericArgument, Ident, LitStr, Meta, NestedMeta, PathArguments, Token, Type, -}; - -/// Keywords used inside `#[view(...)]` and `#[config(...)]` -mod kw { - // config keywords - syn::custom_keyword!(serde_as_str); - syn::custom_keyword!(inner); - syn::custom_keyword!(env_prefix); - // view keywords - syn::custom_keyword!(ignore); - syn::custom_keyword!(into); - // builder keywords - syn::custom_keyword!(parent); -} - -/// Structure to parse `#[view(...)]` attributes. -/// [`Inner`] is responsible for parsing attribute arguments. -pub struct View(std::marker::PhantomData); - -/// Structure to parse `#[config(...)]` attributes. -/// [`Inner`] is responsible for parsing attribute arguments. -struct Config(std::marker::PhantomData); - -/// Structure to parse `#[builder(...)]` attributes. -/// [`Inner`] is responsible for parsing attribute arguments. -struct Builder(std::marker::PhantomData); - -impl AttrParser for View { - const IDENT: &'static str = "view"; -} - -impl AttrParser for Config { - const IDENT: &'static str = "config"; -} - -impl AttrParser for Builder { - const IDENT: &'static str = "builder"; -} - -attr_struct! { - pub struct ViewIgnore { - _kw: kw::ignore, - } -} - -attr_struct! { - pub struct ViewFieldType { - _kw: kw::into, - _eq: Token![=], - ty: Type, - } -} - -attr_struct! { - pub struct ConfigInner { - _kw: kw::inner, - } -} - -attr_struct! { - pub struct ConfigAsStr { - _kw: kw::serde_as_str, - } -} - -attr_struct! { - pub struct ConfigEnvPrefix { - _kw: kw::env_prefix, - _eq: Token![=], - pub prefix: LitStr, - } -} - -attr_struct! { - pub struct BuilderParent { - _kw: kw::parent, - _eq: Token![=], - pub parent: Type, - } -} - -impl From for Type { - fn from(value: ViewFieldType) -> Self { - value.ty - } -} - -#[derive(Clone)] -pub struct StructField { - pub ident: Ident, - pub ty: Type, - pub vis: syn::Visibility, - pub attrs: Vec, - pub env_str: String, - pub has_inner: bool, - pub has_option: bool, - pub has_as_str: bool, - pub lvalue_read: TokenStream, - pub lvalue_write: TokenStream, -} - -impl StructField { - fn from_ast(field: syn::Field, env_prefix: &str) -> Self { - let field_ident = field - .ident - .expect("Already checked for named fields at parsing"); - let (lvalue_read, lvalue_write) = gen_lvalue(&field.ty, &field_ident); - StructField { - has_inner: field - .attrs - .iter() - .any(|attr| Config::::parse(attr).is_ok()), - has_as_str: field - .attrs - .iter() - .any(|attr| Config::::parse(attr).is_ok()), - has_option: is_option_type(&field.ty), - env_str: env_prefix.to_owned() + &field_ident.to_string().to_uppercase(), - attrs: field.attrs, - ident: field_ident, - ty: field.ty, - vis: field.vis, - lvalue_read, - lvalue_write, - } - } -} - -impl ToTokens for StructField { - fn to_tokens(&self, tokens: &mut TokenStream) { - let StructField { - attrs, - ty, - ident, - vis, - .. - } = self; - let stream = quote! { - #(#attrs)* - #vis #ident: #ty - }; - tokens.extend(stream); - } -} - -/// Parsed struct with named fields used in proc macros of this crate -#[derive(Clone)] -pub struct StructWithFields { - pub attrs: Vec, - pub env_prefix: String, - pub vis: syn::Visibility, - _struct_token: Token![struct], - pub ident: Ident, - pub generics: syn::Generics, - pub fields: Vec, - _semi_token: Option, -} - -impl Parse for StructWithFields { - fn parse(input: ParseStream) -> syn::Result { - let attrs = input.call(Attribute::parse_outer)?; - let env_prefix = attrs - .iter() - .map(Config::::parse) - .find_map(Result::ok) - .map(|pref| pref.prefix.value()) - .unwrap_or_default(); - Ok(Self { - attrs, - vis: input.parse()?, - _struct_token: input.parse()?, - ident: input.parse()?, - generics: input.parse()?, - fields: input - .parse::()? - .named - .into_iter() - .map(|field| StructField::from_ast(field, &env_prefix)) - .collect(), - env_prefix, - _semi_token: input.parse()?, - }) - } -} - -impl ToTokens for StructWithFields { - fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { - let StructWithFields { - attrs, - vis, - ident, - generics, - fields, - .. - } = self; - let stream = quote! { - #(#attrs)* - #vis struct #ident #generics { - #(#fields),* - } - }; - tokens.extend(stream); - } -} - -/// Remove attributes with ident [`attr_ident`] from attributes -pub fn remove_attr(attrs: &mut Vec, attr_ident: &str) { - attrs.retain(|attr| !attr.path.is_ident(attr_ident)); -} - -pub fn extract_field_idents(fields: &[StructField]) -> Vec<&Ident> { - fields.iter().map(|field| &field.ident).collect::>() -} - -pub fn extract_field_types(fields: &[StructField]) -> Vec { - fields - .iter() - .map(|field| field.ty.clone()) - .collect::>() -} - -pub fn get_type_argument<'tl>(s: &str, ty: &'tl Type) -> Option<&'tl GenericArgument> { - let Type::Path(path) = ty else { - return None; - }; - let segments = &path.path.segments; - if segments.len() != 1 || segments[0].ident != s { - return None; - } - - if let PathArguments::AngleBracketed(bracketed_arguments) = &segments[0].arguments { - if bracketed_arguments.args.len() == 1 { - return Some(&bracketed_arguments.args[0]); - } - } - None -} - -pub fn get_inner_type<'tl>(outer_ty_ident: &str, ty: &'tl Type) -> &'tl Type { - #[allow(clippy::shadow_unrelated)] - get_type_argument(outer_ty_ident, ty) - .and_then(|ty| { - if let GenericArgument::Type(r#type) = ty { - Some(r#type) - } else { - None - } - }) - .unwrap_or(ty) -} - -pub fn is_arc_rwlock(ty: &Type) -> bool { - let dearced_ty = get_inner_type("Arc", ty); - get_type_argument("RwLock", dearced_ty).is_some() -} - -/// Check if the provided type is of the form [`Option<..>`] -pub fn is_option_type(ty: &Type) -> bool { - get_type_argument("Option", ty).is_some() -} - -/// Remove attributes with ident [`attr_ident`] from struct attributes and field attributes -pub fn remove_attr_from_struct(ast: &mut StructWithFields, attr_ident: &str) { - let StructWithFields { attrs, fields, .. } = ast; - for field in fields { - remove_attr(&mut field.attrs, attr_ident); - } - remove_attr(attrs, attr_ident); -} - -/// Keep only derive attributes passed as a second argument in struct attributes and field attributes -pub fn keep_derive_attr(ast: &mut StructWithFields, kept_attrs: &[&str]) { - ast.attrs - .iter_mut() - .filter(|attr| attr.path.is_ident("derive")) - .for_each(|attr| { - let meta = attr - .parse_meta() - .expect("derive macro must be in one of the meta forms"); - if let Meta::List(list) = meta { - let items: Vec = list - .nested - .into_iter() - .filter(|nested| { - if let NestedMeta::Meta(Meta::Path(path)) = nested { - return kept_attrs.iter().any(|kept_attr| path.is_ident(kept_attr)); - } - // Non-nested all kept by default - true - }) - .collect(); - *attr = syn::parse_quote!( - #[derive(#(#items),*)] - ); - } - }); -} - -/// Keep only attributes passed as a second argument in struct attributes and field attributes -pub fn keep_attrs_in_struct(ast: &mut StructWithFields, kept_attrs: &[&str]) { - let StructWithFields { attrs, fields, .. } = ast; - for field in fields { - field.attrs.retain(|attr| { - kept_attrs - .iter() - .any(|kept_attr| attr.path.is_ident(kept_attr)) - }); - } - attrs.retain(|attr| { - kept_attrs - .iter() - .any(|kept_attr| attr.path.is_ident(kept_attr)) - }); -} - -/// Generate lvalue forms for a struct field, taking [`Arc>`] types -/// into account as well. Returns a 2-tuple of read and write forms. -pub fn gen_lvalue(field_ty: &Type, field_ident: &Ident) -> (TokenStream, TokenStream) { - let is_lvalue = is_arc_rwlock(field_ty); - - let lvalue_read = if is_lvalue { - quote! { self.#field_ident.read().await } - } else { - quote! { self.#field_ident } - }; - - let lvalue_write = if is_lvalue { - quote! { self.#field_ident.write().await } - } else { - quote! { self.#field_ident } - }; - - (lvalue_read, lvalue_write) -} - -/// Check if [`StructWithFields`] has `#[builder(parent = ..)]` -pub fn get_parent_ty(ast: &StructWithFields) -> Type { - ast.attrs - .iter() - .find_map(|attr| Builder::::parse(attr).ok()) - .map(|builder| builder.parent) - .expect("Should not be called on structs with no `#[builder(..)]` attribute") -} - -pub fn extract_box_generic(box_seg: &mut syn::PathSegment) -> &mut syn::Type { - let syn::PathArguments::AngleBracketed(generics) = &mut box_seg.arguments else { - panic!("`Box` should have explicit generic"); - }; - - assert!( - generics.args.len() == 1, - "`Box` should have exactly one generic argument" - ); - let syn::GenericArgument::Type(generic_type) = - generics.args.first_mut().expect("Can't be empty") - else { - panic!("`Box` should have type as a generic argument") - }; - - generic_type -} diff --git a/config/base/derive/src/view.rs b/config/base/derive/src/view.rs deleted file mode 100644 index a020c7edc13..00000000000 --- a/config/base/derive/src/view.rs +++ /dev/null @@ -1,183 +0,0 @@ -use gen::*; -use proc_macro::TokenStream; -use quote::{format_ident, quote}; - -use super::utils::{ - extract_field_idents, extract_field_types, remove_attr, remove_attr_from_struct, AttrParser, - StructField, StructWithFields, View, ViewFieldType, ViewIgnore, -}; - -pub fn impl_view(ast: StructWithFields) -> TokenStream { - let original = original_struct(ast.clone()); - let view = view_struct(ast); - let impl_from = impl_from(&original, &view); - let impl_has_view = impl_has_view(&original); - let assertions = assertions(&view); - let out = quote! { - #original - #impl_has_view - #view - #impl_from - #assertions - }; - out.into() -} - -mod gen { - use super::*; - use crate::utils::{keep_attrs_in_struct, keep_derive_attr}; - - pub fn original_struct(mut ast: StructWithFields) -> StructWithFields { - remove_attr_from_struct(&mut ast, "view"); - ast - } - - pub fn view_struct(mut ast: StructWithFields) -> StructWithFields { - // Remove fields with #[view(ignore)] - ast.fields.retain(is_view_field_ignored); - // Change field type to `Type` if it has attribute #[view(into = Type)] - ast.fields.iter_mut().for_each(view_field_change_type); - // Replace doc-string for view - remove_attr(&mut ast.attrs, "doc"); - let view_doc = format!("View for {}", ast.ident); - ast.attrs.push(syn::parse_quote!( - #[doc = #view_doc] - )); - keep_derive_attr( - &mut ast, - &[ - "Clone", - "Debug", - "Deserialize", - "Serialize", - "PartialEq", - "Eq", - ], - ); - keep_attrs_in_struct(&mut ast, &["serde", "doc", "derive", "cfg"]); - ast.ident = format_ident!("{}View", ast.ident); - ast - } - - pub fn impl_from( - original: &StructWithFields, - view: &StructWithFields, - ) -> proc_macro2::TokenStream { - let StructWithFields { - ident: original_ident, - .. - } = original; - let StructWithFields { - generics, - ident: view_ident, - fields, - .. - } = view; - let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); - let field_idents = extract_field_idents(fields); - let field_cfg_attrs = fields - .iter() - .map(|field| { - field - .attrs - .iter() - .filter(|attr| attr.path.is_ident("cfg")) - .collect::>() - }) - .collect::>(); - - let field_froms: Vec<_> = fields - .iter() - .map(|field| { - let field_ident = &field.ident; - if let syn::Type::Path(syn::TypePath { path, .. }) = &field.ty { - let last_segment = path.segments.last().expect("Not empty"); - if last_segment.ident == "Box" { - return quote! { - #field_ident: Box::new(core::convert::From::<_>::from(*#field_ident)), - }; - } - } - quote! { - #field_ident: core::convert::From::<_>::from(#field_ident), - } - }) - .collect(); - - quote! { - impl #impl_generics core::convert::From<#original_ident> for #view_ident #ty_generics #where_clause { - fn from(config: #original_ident) -> Self { - let #original_ident { - #( - #(#field_cfg_attrs)* - #field_idents, - )* - .. - } = config; - Self { - #( - #(#field_cfg_attrs)* - #field_froms - )* - } - } - } - } - } - - pub fn impl_has_view(original: &StructWithFields) -> proc_macro2::TokenStream { - let StructWithFields { - generics, - ident: view_ident, - .. - } = original; - let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); - - quote! { - impl #impl_generics iroha_config_base::view::HasView for #view_ident #ty_generics #where_clause {} - } - } - - pub fn assertions(view: &StructWithFields) -> proc_macro2::TokenStream { - let StructWithFields { fields, .. } = view; - let field_types = extract_field_types(fields); - let messages: Vec = extract_field_idents(fields) - .iter() - .map(|ident| { - format!("Field `{ident}` has it's own view, consider adding attribute #[view(into = ViewType)]") - }) - .collect(); - quote! { - /// Assert that every field of 'View' doesn't implement `HasView` trait - const _: () = { - use iroha_config_base::view::NoView; - #( - const _: () = assert!(!iroha_config_base::view::IsInstanceHasView::<#field_types>::IS_HAS_VIEW, #messages); - )* - }; - } - } -} - -/// Check if [`Field`] has `#[view(ignore)]` -fn is_view_field_ignored(field: &StructField) -> bool { - field - .attrs - .iter() - .map(View::::parse) - .find_map(Result::ok) - .is_none() -} - -/// Change [`Field`] type to `Type` if `#[view(type = Type)]` is present -fn view_field_change_type(field: &mut StructField) { - if let Some(ty) = field - .attrs - .iter() - .map(View::::parse) - .find_map(Result::ok) - .map(ViewFieldType::into) - { - field.ty = ty; - } -} diff --git a/config/base/src/lib.rs b/config/base/src/lib.rs index 7ea61d35ddb..cf898f6ab57 100644 --- a/config/base/src/lib.rs +++ b/config/base/src/lib.rs @@ -1,493 +1,632 @@ -//! Package for managing iroha configuration -use std::{fmt::Debug, path::Path}; +//! Utilities behind Iroha configurations + +use std::{ + borrow::Cow, + cell::RefCell, + collections::{HashMap, HashSet}, + convert::Infallible, + env::VarError, + error::Error, + ffi::OsString, + fmt::{Debug, Display, Formatter}, + ops::Sub, + path::PathBuf, + str::FromStr, + time::Duration, +}; + +use eyre::{eyre, Report, WrapErr}; +pub use merge::Merge; +pub use serde; +use serde::{Deserialize, Serialize}; + +/// [`Duration`], but can parse a human-readable string. +/// TODO: currently deserializes just as [`Duration`] +#[serde_with::serde_as] +#[derive(Debug, Copy, Clone, Deserialize, Serialize, Ord, PartialOrd, Eq, PartialEq)] +pub struct HumanDuration(#[serde_as(as = "serde_with::DurationMilliSeconds")] pub Duration); + +impl HumanDuration { + /// Get the [`Duration`] + pub fn get(self) -> Duration { + self.0 + } +} -use serde::{de::DeserializeOwned, Deserialize, Deserializer, Serialize}; +/// Representation of amount of bytes, parseable from a human-readable string. +#[derive(Debug, Copy, Clone, Deserialize, Serialize)] +pub struct HumanBytes(pub T); -pub mod derive { - //! Derives for configuration entities - /// Generate view for the type and implement conversion `Type -> View`. - /// View contains a subset of the fields that the type has. - /// - /// Works only with structs. - /// - /// ## Container attributes - /// - /// ## Field attributes - /// ### `#[view(ignore)]` - /// Marks fields to ignore when converting to view type. - /// - /// ### `#[view(into = Ty)]` - /// Sets view's field type to Ty. - /// - /// ## Examples - /// - /// ```rust - /// use iroha_config_base::derive::view; - /// - /// view! { - /// #[derive(Default)] - /// struct Structure { - /// #[view(into = u64)] - /// a: u32, - /// // `View` shouldn't have field `b` so we must exclude it. - /// #[view(ignore)] - /// b: u32, - /// } - /// } - /// - /// // Will generate something like - /// // --//-- original struct - /// // struct StructureView { - /// // a: u64, - /// // } - /// // - /// // impl From for StructureView { - /// // fn from(value: Structure) -> Self { - /// // let Structure { - /// // a, - /// // .. - /// // } = value; - /// // Self { - /// // a: From::<_>::from(a), - /// // } - /// // } - /// // } - /// - /// - /// let structure = Structure { a: 13, b: 37 }; - /// let view: StructureView = structure.into(); - /// assert_eq!(view.a, 13); - /// ``` - pub use iroha_config_derive::view; - /// Derive macro for implementing the trait - /// [`iroha_config::base::proxy::Builder`](`crate::proxy::Builder`) - /// for config structures. Meant to be used on proxy types only, for - /// details see [`iroha_config::base::derive::Proxy`](`crate::derive::Proxy`). - /// - /// # Container attributes - /// - /// ## `#[builder(parent = ..)]` - /// Takes a target type to build into, e.g. for a `ConfigurationProxy` - /// it would be `Configuration`. - /// - /// # Examples - /// - /// ```rust - /// use iroha_config_base::derive::{Builder, Override, LoadFromEnv}; - /// use iroha_config_base::proxy::Builder as _; - /// - /// // Also need `LoadFromEnv` as it owns the `#[config]` attribute - /// #[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize, LoadFromEnv, Builder)] - /// #[builder(parent = Outer)] - /// struct OuterProxy { #[config(inner)] inner: Option } - /// - /// #[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize, LoadFromEnv, Builder, Override)] - /// #[builder(parent = Inner)] - /// struct InnerProxy { b: Option } - /// - /// #[derive(Debug, PartialEq)] - /// struct Outer { inner: Inner } - /// - /// #[derive(Debug, PartialEq)] - /// struct Inner { b: String } - /// - /// let outer_proxy = OuterProxy { inner: Some(InnerProxy { b: Some("a".to_owned()) })}; - /// - /// let outer = Outer { inner: Inner { b: "a".to_owned() } }; - /// - /// assert_eq!(outer, outer_proxy.build().unwrap()); - /// ``` - pub use iroha_config_derive::Builder; - /// Derive macro for implementing the trait - /// [`iroha_config::base::proxy::LoadFromDisk`](`crate::proxy::LoadFromDisk`) - /// trait for config structures. - /// - /// Meant to be used on proxy types only, for - /// details see [`iroha_config::base::derive::Proxy`](`crate::derive::Proxy`). - /// - /// The trait's only method, `from_path`, - /// deserializes a JSON config at the provided path into the parent proxy structure, - /// leaving it empty in case of any error. - /// - /// The `ReturnValue` associated type can be - /// swapped for anything suitable. Currently, the proxy structure is returned - /// by default. - pub use iroha_config_derive::LoadFromDisk; - /// Derive macro for implementing the - /// [`iroha_config::base::proxy::LoadFromDisk`](`crate::proxy::LoadFromDisk`) - /// trait for config structures. - /// - /// Meant to be used on proxy types only, for - /// details see [`iroha_config::base::derive::Proxy`](`crate::derive::Proxy`). - /// - /// The `ReturnValue` associated type can be - /// swapped for anything suitable. Currently, the proxy structure is returned - /// by default. - /// - /// # Container attributes - /// ## `[config(env_prefix)]` - /// Sets prefix for all the env variables derived from fields in the - /// corresponding structure. - /// - /// ### Example - /// - /// ``` rust - /// use iroha_config_base::derive::LoadFromEnv; - /// use iroha_config_base::proxy::LoadFromEnv as _; - /// - /// #[derive(serde::Deserialize, serde::Serialize, LoadFromEnv)] - /// #[config(env_prefix = "PREFIXED_")] - /// struct PrefixedProxy { a: Option } - /// - /// std::env::set_var("PREFIXED_A", "B"); - /// let prefixed = PrefixedProxy::from_std_env().unwrap(); - /// assert_eq!(prefixed.a.unwrap(), "B"); - /// ``` - /// - /// # Field attributes - /// ## `#[config(inner)]` - /// Tells macro that the structure stores another config inside, - /// allowing to load it recursively. Moreover, the types that - /// have this attributes on them should also implement or - /// derive the [`iroha_config::base::proxy::Override`](`crate::proxy::Override`) - /// trait. - /// - /// ### Example - /// - /// ```rust - /// use iroha_config_base::derive::{Override, LoadFromEnv}; - /// use iroha_config_base::proxy::LoadFromEnv as _; - /// - /// #[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize, LoadFromEnv)] - /// struct OuterProxy { #[config(inner)] inner: Option } - /// - /// #[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize, Override, LoadFromEnv)] - /// struct InnerProxy { b: Option } - /// - /// let mut outer = OuterProxy { inner: Some(InnerProxy { b: Some("a".to_owned()) })}; - /// - /// std::env::set_var("B", "a"); - /// let env_outer = OuterProxy::from_std_env().unwrap(); - /// - /// assert_eq!(env_outer, outer); - /// ``` - /// - /// ## `#[config(serde_as_str)]` - /// Tells macro to deserialize from env variable as a bare string. - /// - /// ### Example - /// - /// ``` - /// use iroha_config_base::derive::LoadFromEnv; - /// use iroha_config_base::proxy::LoadFromEnv; - /// use std::net::Ipv4Addr; - /// - /// #[derive(serde::Deserialize, serde::Serialize, LoadFromEnv)] - /// struct IpAddrProxy { #[config(serde_as_str)] ip: Option } - /// - /// std::env::set_var("IP", "127.0.0.1"); - /// let ip = IpAddrProxy::from_std_env().unwrap(); - /// assert_eq!(ip.ip.unwrap(), Ipv4Addr::new(127, 0, 0, 1)); - /// ``` - pub use iroha_config_derive::LoadFromEnv; - /// Derive macro for implementing the trait - /// [`iroha_config::base::proxy::Override`](`crate::proxy::Override`) - /// for config structures. Given two proxies, consumes them by recursively overloading - /// fields of [`self`] with fields of [`other`]. Order matters here, - /// i.e. `self.combine(other)` could yield different results than `other.combine(self)`. - /// - /// Meant to be used on proxy types only, for - /// details see [`iroha_config::base::derive::Proxy`](`crate::derive::Proxy`). - /// - /// # Examples - /// - /// ```rust - /// use iroha_config_base::derive::{Override, LoadFromEnv}; - /// use iroha_config_base::proxy::Override as _; - /// - /// #[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize, Override, LoadFromEnv)] - /// struct OuterProxy { - /// #[config(inner)] - /// inner: Option, - /// a: Option - /// } +impl HumanBytes { + /// Get the number of bytes + pub fn get(self) -> T { + self.0 + } +} + +/// Error representing a missing field in the configuration +#[derive(thiserror::Error, Debug)] +#[error("missing field: `{path}`")] +pub struct MissingFieldError { + path: String, +} + +impl MissingFieldError { + /// Create an instance + pub fn new(s: &str) -> Self { + Self { path: s.to_owned() } + } +} + +/// Provides environment variables +pub trait ReadEnv { + /// Read a value of an environment variable. /// - /// #[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize, Override, LoadFromEnv)] - /// struct InnerProxy { b: Option } + /// This is a fallible operation, which might return an empty value if the given key is not + /// present. /// - /// let left_outer = OuterProxy { - /// inner: Some(InnerProxy { b: Some("a".to_owned()) }), - /// a: None - /// }; + /// [`Cow`] is used for flexibility. The read value might be given both as an owned and as a + /// borrowed string depending on the structure that implements [`ReadEnv`]. On the receiving + /// part, it might be convenient to parse the string while just borrowing it + /// (e.g. with [`FromStr`]), but might be also convenient to own the value. [`Cow`] covers all + /// of this. /// - /// let right_outer = OuterProxy { - /// inner: None, - /// a: Some("b".to_owned()) - /// }; + /// # Errors + /// For any reason an implementor might have. + fn read_env(&self, key: impl AsRef) -> Result>, E>; +} + +/// Constructs from environment variables +pub trait FromEnv { + /// Constructs from environment variables using [`ReadEnv`] /// - /// let res_outer = OuterProxy { - /// inner: Some(InnerProxy { b: Some("a".to_owned()) }), - /// a: Some("b".to_owned()) - /// }; + /// # Errors + /// For any reason an implementor might have. + // `E: Error` so that it could be wrapped into a Report + fn from_env>(env: &R) -> FromEnvResult + where + Self: Sized; +} + +/// Result of [`FromEnv::from_env`]. Intended to contain multiple possible errors at once. +pub type FromEnvResult = eyre::Result>; + +/// Marker trait to implement [`FromEnv`] if a type implements [`Default`] +pub trait FromEnvDefaultFallback {} + +impl FromEnv for T +where + T: FromEnvDefaultFallback + Default, +{ + fn from_env>(_env: &R) -> FromEnvResult + where + Self: Sized, + { + Ok(Self::default()) + } +} + +/// Simple collector of errors. +/// +/// Will panic on [`Drop`] if contains errors that are not handled with [`Emitter::finish`]. +pub struct Emitter { + errors: Vec, + bomb: drop_bomb::DropBomb, +} + +impl Emitter { + /// Create a new empty emitter + pub fn new() -> Self { + Self { + errors: Vec::new(), + bomb: drop_bomb::DropBomb::new( + "Errors emitter is dropped without consuming collected errors", + ), + } + } + + /// Emit a single error + pub fn emit(&mut self, error: T) { + self.errors.push(error); + } + + /// Emit a collection of errors + pub fn emit_collection(&mut self, mut errors: ErrorsCollection) { + self.errors.append(&mut errors.0); + } + + /// Transform the emitter into a [`Result`], containing an [`ErrorCollection`] if + /// any errors were emitted. /// - /// assert_eq!(left_outer.override_with(right_outer), res_outer); - /// ``` - pub use iroha_config_derive::Override; - /// Derive macro for implementing the corresponding proxy type - /// for config structures. Most of the other traits in the - /// [`iroha_config_base::proxy`](`crate::proxy`) module are - /// best derived indirectly via this macro. Proxy types serve - /// as a stand-in for flexible configuration loading either - /// from environment variables or configuration files. Proxy types also - /// provide methods to build the initial parent type from them - /// (via [`iroha_config_base::proxy::Builder`](`crate::proxy::Builder`) - /// trait) and ways to combine two proxies together (via - /// [`iroha_config_base::proxy::Override`](`crate::proxy::Override`)). - pub use iroha_config_derive::Proxy; - use serde::Deserialize; - use thiserror::Error; - - /// Represents a path to a nested field in a config structure - #[derive(Debug, Deserialize)] - #[serde(transparent)] - pub struct Field(pub Vec); - - impl std::fmt::Display for Field { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - // separate fields with dots - std::fmt::Display::fmt(&self.0.join("."), f) + /// # Errors + /// If any errors were emitted. + pub fn finish(mut self) -> Result<(), ErrorsCollection> { + self.bomb.defuse(); + + if self.errors.is_empty() { + Ok(()) + } else { + Err(ErrorsCollection(self.errors)) } } +} - // TODO: deal with `#[serde(skip)]` - /// Derive `Configurable` and `Proxy` error - #[derive(Debug, Error, Deserialize, displaydoc::Display)] - #[ignore_extra_doc_attributes] - #[allow(clippy::enum_variant_names)] - pub enum Error { - /// Failed to deserialize the field `{field}` - /// - /// Used in [`super::proxy::LoadFromEnv`] trait for deserialization - /// errors - #[serde(skip)] - FieldDeserialization { - /// Field name (known at compile time) - field: &'static str, - /// Unified error - #[source] - error: eyre::Report, - }, - - /// Please add `{field}` to the configuration - #[serde(skip)] - MissingField { - /// Field name - field: &'static str, - /// Additional message to be added as `color_eyre::suggestion` - message: &'static str, - }, - - /// Key pair creation failed, most likely because the keys don't form a pair - Crypto(#[from] iroha_crypto::error::Error), - - // IMO this variant should not exist. If the value is inferred, we should only warn people if the inferred value is different from the provided one. - /// You should remove the field `{field}` as its value is determined by other configuration parameters - #[serde(skip)] - ProvidedInferredField { - /// Field name - field: &'static str, - /// Additional message to be added as `color_eyre::suggestion` - message: &'static str, - }, - - /// The value {value} of `{field}` is wrong. Please change the value - #[serde(skip)] - InsaneValue { - /// The value of the field that's incorrect - value: String, - /// Field name that contains invalid value - field: &'static str, - /// Additional message to be added as `color_eyre::suggestion` - message: String, - // docstring: &'static str, // TODO: Inline the docstring for easy access - }, - - /// Reading file from disk failed - /// - /// Used in the [`LoadFromDisk`](`crate::proxy::LoadFromDisk`) trait for file read errors - #[serde(skip)] - Disk(#[from] std::io::Error), - - /// Deserializing JSON failed - /// - /// Used in [`LoadFromDisk`](`crate::proxy::LoadFromDisk`) trait for deserialization errors - #[serde(skip)] - Json5(#[from] json5::Error), - } - - impl Error { - /// This method is needed because a call of [`eyre::eyre!`] cannot be compiled when - /// generated in a proc macro. So, this shorthand is needed for proc macros. - pub fn field_deserialization_from_json( - field: &'static str, - error: &serde_json::Error, - ) -> Self { - Self::FieldDeserialization { - field, - error: eyre::eyre!("JSON: {}", error), +impl Default for Emitter { + fn default() -> Self { + Self::new() + } +} + +impl Emitter { + /// Shorthand to emit a [`MissingFieldError`]. + pub fn emit_missing_field(&mut self, field_name: impl AsRef) { + self.emit(MissingFieldError::new(field_name.as_ref())) + } + + /// Tries to [`UnwrapPartial`], collecting errors on failure. + /// + /// This method is relevant for [`Emitter`], because [`UnwrapPartial`] + /// returns a collection of [`MissingFieldError`]s. + pub fn try_unwrap_partial(&mut self, partial: P) -> Option { + partial.unwrap_partial().map_or_else( + |err| { + self.emit_collection(err); + None + }, + Some, + ) + } +} + +/// An [`Error`] containing multiple errors inside +pub struct ErrorsCollection(Vec); + +impl Error for ErrorsCollection {} + +/// Displays each error on a new line +impl Display for ErrorsCollection +where + T: Display, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + for (i, item) in self.0.iter().enumerate() { + if i > 0 { + writeln!(f)?; } + write!(f, "{item}")?; } + Ok(()) + } +} - /// See [`Self::field_deserialization_from_json`] - pub fn field_deserialization_from_json5(field: &'static str, error: &json5::Error) -> Self { - Self::FieldDeserialization { - field, - error: eyre::eyre!("JSON5: {}", error), +impl Debug for ErrorsCollection +where + T: Debug, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + for (i, item) in self.0.iter().enumerate() { + if i > 0 { + writeln!(f)?; } + write!(f, "{item:?}")?; } + Ok(()) + } +} + +impl From for ErrorsCollection { + fn from(value: T) -> Self { + Self(vec![value]) } } -pub mod view { - //! Module for view related traits and structs +impl IntoIterator for ErrorsCollection { + type Item = T; + type IntoIter = std::vec::IntoIter; - /// Marker trait to set default value [`IsInstanceHasView::IS_INSTANCE_HAS_VIEW`] to `false` - pub trait NoView { - /// [`Self`] doesn't implement [`HasView`] - const IS_HAS_VIEW: bool = false; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() } +} - impl NoView for T {} +/// An implementation of [`ReadEnv`] for testing convenience. +#[derive(Default)] +pub struct TestEnv { + map: HashMap, + visited: RefCell>, +} - /// Marker traits for types for which views are implemented - pub trait HasView {} +impl TestEnv { + /// Create new empty environment + pub fn new() -> Self { + Self::default() + } - /// Wrapper structure used to check if type implements `[HasView]` - /// If `T` doesn't implement [`HasView`] then - /// [`NoView::IS_INSTANCE_HAS_VIEW`] (`false`) will be used. - /// Otherwise [`IsInstanceHasView::IS_INSTANCE_HAS_VIEW`] (`true`) - /// from `impl` block will shadow `NoView::IS_INSTANCE_HAS_VIEW` - pub struct IsInstanceHasView(core::marker::PhantomData); + /// Create an environment with a given map + pub fn with_map(map: HashMap) -> Self { + Self { map, ..Self::new() } + } - impl IsInstanceHasView { - /// `T` implements trait [`HasView`] - pub const IS_INSTANCE_HAS_VIEW: bool = true; + /// Set a key-value pair + #[must_use] + pub fn set(mut self, key: impl AsRef, value: impl AsRef) -> Self { + self.map + .insert(key.as_ref().to_string(), value.as_ref().to_string()); + self + } + + /// Get a set of keys not visited yet by [`ReadEnv::read_env`] + pub fn unvisited(&self) -> HashSet { + let all_keys: HashSet<_> = self.map.keys().map(ToOwned::to_owned).collect(); + let visited: HashSet<_> = self.visited.borrow().clone(); + all_keys.sub(&visited) } } -pub mod proxy { - //! Module with traits for configuration proxies +impl ReadEnv for TestEnv { + fn read_env(&self, key: impl AsRef) -> Result>, Infallible> { + self.visited.borrow_mut().insert(key.as_ref().to_string()); + Ok(self + .map + .get(key.as_ref()) + .map(String::as_str) + .map(Cow::from)) + } +} - use super::*; +/// Implemented of [`ReadEnv`] on top of [`std::env::var`]. +#[derive(Debug, Copy, Clone)] +pub struct StdEnv; - /// Trait for combining two configuration instances - pub trait Override: Serialize + DeserializeOwned + Sized { - /// If any of the fields in `other` are filled, they - /// override the values of the fields in [`self`]. - #[must_use] - fn override_with(self, other: Self) -> Self; +impl ReadEnv for StdEnv { + fn read_env(&self, key: impl AsRef) -> Result>, StdEnvError> { + match std::env::var(key.as_ref()) { + Ok(value) => Ok(Some(value.into())), + Err(VarError::NotPresent) => Ok(None), + Err(VarError::NotUnicode(input)) => Err(StdEnvError::NotUnicode(input)), + } } +} + +/// An error that might occur while reading from std env. +/// +/// - **Q: Why just [`VarError`] is not used?** +/// - A: Because [`VarError::NotPresent`] is `Ok(None)` in terms of [`ReadEnv`] +#[derive(Debug, thiserror::Error)] +pub enum StdEnvError { + /// Reflects [`VarError::NotUnicode`] + #[error("the specified environment variable was found, but it did not contain valid unicode data: {0:?}")] + NotUnicode(OsString), +} + +/// A tool that simplifies work with graceful parsing of multiple values in combination +/// with [`Emitter`] +pub enum ParseEnvResult { + /// Value was found and parsed + Value(T), + /// An error occurred while reading or parsing the environment + Error, + /// Value was not found, no error occurred + None, +} - impl Override for Box { - fn override_with(self, other: Self) -> Self { - Box::new(T::override_with(*self, *other)) +impl ParseEnvResult +where + T: FromStr, + ::Err: Error + Send + Sync + 'static, +{ + /// _Simple_ parsing using [`FromStr`] + pub fn parse_simple( + emitter: &mut Emitter, + env: &impl ReadEnv, + env_key: impl AsRef, + field_name: impl AsRef, + ) -> Self { + // FIXME: errors handling is such a mess now + let read = match env + .read_env(env_key.as_ref()) + .map_err(|err| eyre!("{err}")) + .wrap_err_with(|| eyre!("ooops")) + { + Ok(Some(value)) => value, + Ok(None) => return Self::None, + Err(report) => { + emitter.emit(report); + return Self::Error; + } + }; + + match FromStr::from_str(read.as_ref()).wrap_err_with(|| { + eyre!( + "failed to parse `{}` field from `{}` env variable", + field_name.as_ref(), + env_key.as_ref() + ) + }) { + Ok(value) => Self::Value(value), + Err(report) => { + emitter.emit(report); + Self::Error + } } } +} - /// Trait for configuration loading and deserialization from - /// the environment - pub trait LoadFromEnv: Sized { - /// The return type. Could be target `Configuration`, - /// some `Result`, `Option`, or any other type that - /// wraps a `..Proxy` or `Configuration` type. - type ReturnValue; - - /// Load configuration from the environment - /// - /// # Errors - /// - Fails if the deserialization of any field fails. - fn from_env(fetcher: &F) -> Self::ReturnValue; - - /// Implementation of [`Self::from_env`] using [`std::env::var`]. - fn from_std_env() -> Self::ReturnValue { - struct FetchStdEnv; - - impl FetchEnv for FetchStdEnv { - fn fetch>( - &self, - key: K, - ) -> Result { - std::env::var(key) - } - } +/// During this conversion, [`ParseEnvResult::Error`] is interpreted as [`None`]. +impl From> for Option { + fn from(value: ParseEnvResult) -> Self { + match value { + ParseEnvResult::None | ParseEnvResult::Error => None, + ParseEnvResult::Value(x) => Some(x), + } + } +} + +/// Value container to be used in the partial layers. +/// +/// In partial layers, values might be present or not. +/// Partial layers consisting from [`UserField`] might be _incomplete_, +/// merged into each other (with [`merge::Merge`]), +/// and finally unwrapped (with [`UnwrapPartial`]) into a _complete_ layer of data. +/// +/// Partial layers might consist of fields other than [`UserField`], but their types should follow +/// the same conventions. This might be used e.g. to implement custom merge strategy. +#[derive( + Serialize, + Deserialize, + Ord, + PartialOrd, + Eq, + PartialEq, + derive_more::From, + Clone, + derive_more::Deref, + derive_more::DerefMut, +)] +pub struct UserField(Option); + +/// Delegating debug repr to [`Option`] +impl Debug for UserField { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +/// Empty user field +impl Default for UserField { + fn default() -> Self { + Self(None) + } +} - Self::from_env(&FetchStdEnv) +/// The other's value takes precedence over the self's +impl Merge for UserField { + fn merge(&mut self, other: Self) { + if let Some(value) = other.0 { + self.0 = Some(value) } } +} + +impl UserField { + /// Get the field value + pub fn get(self) -> Option { + self.0 + } + + /// Set the field value + pub fn set(&mut self, value: T) { + self.0 = Some(value); + } +} + +impl From> for UserField { + fn from(value: ParseEnvResult) -> Self { + let option: Option = value.into(); + option.into() + } +} - impl LoadFromEnv for Box { - type ReturnValue = T::ReturnValue; +/// Conversion from a layer's partial state into its full state, with all required +/// fields presented. +pub trait UnwrapPartial { + /// The output of unwrapping, i.e. the full layer + type Output; - fn from_env(fetcher: &F) -> Self::ReturnValue { - T::from_env(fetcher) + /// Unwraps the partial into a structure with all required fields present. + /// + /// # Errors + /// If there are absent fields, returns a bulk of [`MissingFieldError`]s. + fn unwrap_partial(self) -> UnwrapPartialResult; +} + +/// Used for [`UnwrapPartial::unwrap_partial`] +pub type UnwrapPartialResult = Result>; + +/// A tool to implement "extends" mechanism, i.e. mixins. +/// +/// It allows users to provide a path of other files that should be used as +/// a _base_ layer. +/// +/// ```toml +/// # contents of this file will be merged into the contents of `base.toml` +/// extends = "./base.toml" +/// ``` +/// +/// It is possible to specify multiple extensions at once: +/// +/// ```toml +/// # read `foo`, then merge `bar`, then merge `baz`, then merge this file's contents +/// extends = ["foo", "bar", "baz"] +/// ``` +/// +/// From the developer side, it should be used as a field on a partial layer: +/// +/// ``` +/// use iroha_config_base::ExtendsPaths; +/// +/// struct SomePartial { +/// extends: Option, +/// // ..other fields +/// } +/// ``` +/// +/// When this layer is constructed from a file, `ExtendsPaths` should be handled e.g. +/// with [`ExtendsPaths::iter`]. +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)] +#[serde(untagged)] +pub enum ExtendsPaths { + /// A single path to extend from + Single(PathBuf), + /// A chain of paths to extend from + Chain(Vec), +} + +/// Iterator over [`ExtendsPaths`] for convenience +pub enum ExtendsPathsIter<'a> { + #[allow(missing_docs)] + Single(Option<&'a PathBuf>), + #[allow(missing_docs)] + Multiple(std::slice::Iter<'a, PathBuf>), +} + +impl ExtendsPaths { + /// Normalise into an iterator over chain of paths to extend from + #[allow(clippy::iter_without_into_iter)] // extra for this case + pub fn iter(&self) -> ExtendsPathsIter<'_> { + match &self { + Self::Single(x) => ExtendsPathsIter::Single(Some(x)), + Self::Chain(vec) => ExtendsPathsIter::Multiple(vec.iter()), + } + } +} + +impl<'a> Iterator for ExtendsPathsIter<'a> { + type Item = &'a PathBuf; + + fn next(&mut self) -> Option { + match self { + Self::Single(x) => x.take(), + Self::Multiple(iter) => iter.next(), } } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn single_missing_field() { + let mut emitter: Emitter = Emitter::new(); + + emitter.emit_missing_field("foo"); + + let err = emitter.finish().unwrap_err(); + + assert_eq!(format!("{err}"), "missing field: `foo`") + } + + #[test] + fn multiple_missing_fields() { + let mut emitter: Emitter = Emitter::new(); + + emitter.emit_missing_field("foo"); + emitter.emit_missing_field("bar"); + + let err = emitter.finish().unwrap_err(); + + assert_eq!( + format!("{err}"), + "missing field: `foo`\nmissing field: `bar`" + ) + } + + #[test] + fn merging_user_fields_overrides_old_value() { + let mut field = UserField(None); + field.merge(UserField(Some(4))); + assert_eq!(field, UserField(Some(4))); + + let mut field = UserField(Some(4)); + field.merge(UserField(Some(5))); + assert_eq!(field, UserField(Some(5))); - /// Abstraction over the actual implementation of how env variables are gotten - /// from the environment. Necessary for mocking in tests. - pub trait FetchEnv { - /// The signature of [`std::env::var`]. - /// - /// # Errors - /// - /// See errors of [`std::env::var`]. - fn fetch>(&self, key: K) -> Result; + let mut field = UserField(Some(4)); + field.merge(UserField(None)); + assert_eq!(field, UserField(Some(4))); } - /// Trait for configuration loading and deserialization from disk - pub trait LoadFromDisk: Sized { - /// The return type. Could be target `Configuration`, - /// some `Result`, `Option`, or any other type that - /// wraps a `..Proxy` or `Configuration` type. - type ReturnValue; + #[derive(Deserialize, Default)] + #[serde(default)] + struct TestExtends { + extends: Option, + } + + #[test] + fn parse_empty_extends() { + let value: TestExtends = toml::from_str("").expect("should be fine with empty input"); - /// Construct [`Self`] from a path-like object. - /// - /// # Errors - /// - File not found. - /// - File found, but peer configuration parsing failed. - fn from_path + Debug + Clone>(path: P) -> Self::ReturnValue; + assert_eq!(value.extends, None); } - /// Trait for building the final config from a proxy one - pub trait Builder { - /// The return type. Could be target `Configuration`, - /// some `Result`, `Option` as users see fit. - type ReturnValue; + #[test] + fn parse_single_extends_path() { + let value: TestExtends = toml::toml! { + extends = "./path" + } + .try_into() + .unwrap(); - /// Construct [`Self::ReturnValue`] from a proxy object. - fn build(self) -> Self::ReturnValue; + assert_eq!(value.extends, Some(ExtendsPaths::Single("./path".into()))); } - impl Builder for Box { - type ReturnValue = T::ReturnValue; + #[test] + fn parse_multiple_extends_paths() { + let value: TestExtends = toml::toml! { + extends = ["foo", "bar", "baz"] + } + .try_into() + .unwrap(); + + assert_eq!( + value.extends, + Some(ExtendsPaths::Chain(vec![ + "foo".into(), + "bar".into(), + "baz".into() + ])) + ); + } - fn build(self) -> Self::ReturnValue { - T::build(*self) + #[test] + fn iterating_over_extends() { + impl ExtendsPaths { + fn as_str_vec(&self) -> Vec<&str> { + self.iter().map(|p| p.to_str().unwrap()).collect() + } } + + let single = ExtendsPaths::Single("single".into()); + assert_eq!(single.as_str_vec(), vec!["single"]); + + let multi = ExtendsPaths::Chain(vec!["foo".into(), "bar".into(), "baz".into()]); + assert_eq!(multi.as_str_vec(), vec!["foo", "bar", "baz"]); } - /// Deserialization helper for proxy fields that wrap an `Option` - /// - /// # Errors - /// When deserialization of the field fails, e.g. it doesn't have - /// the `Option>` - #[allow(clippy::option_option)] - pub fn some_option<'de, T, D>(deserializer: D) -> Result>, D::Error> - where - T: Deserialize<'de>, - D: Deserializer<'de>, - { - Option::::deserialize(deserializer).map(Some) + #[test] + fn deserialize_human_duration() { + #[derive(Deserialize)] + struct Test { + value: HumanDuration, + } + + let Test { value } = toml::toml! { + value = 10_500 + } + .try_into() + .expect("input is fine, should parse"); + + assert_eq!(value.get(), Duration::from_millis(10_500)); } } diff --git a/config/iroha_test_config.json b/config/iroha_test_config.json deleted file mode 100644 index 53339579831..00000000000 --- a/config/iroha_test_config.json +++ /dev/null @@ -1,123 +0,0 @@ -{ - "CHAIN_ID": "00000000-0000-0000-0000-000000000000", - "PUBLIC_KEY": "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B", - "PRIVATE_KEY": { - "digest_function": "ed25519", - "payload": "282ED9F3CF92811C3818DBC4AE594ED59DC1A2F78E4241E31924E101D6B1FB831C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B" - }, - "KURA": { - "INIT_MODE": "strict", - "BLOCK_STORE_PATH": "./storage", - "BLOCKS_PER_STORAGE_FILE": 1000, - "ACTOR_CHANNEL_CAPACITY": 100, - "DEBUG_OUTPUT_NEW_BLOCKS": false - }, - "SUMERAGI": { - "BLOCK_TIME_MS": 1000, - "TRUSTED_PEERS": [ - { - "address": "127.0.0.1:1337", - "public_key": "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B" - }, - { - "address": "127.0.0.1:1338", - "public_key": "ed0120CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1" - }, - { - "address": "127.0.0.1:1339", - "public_key": "ed0120FACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020" - }, - { - "address": "127.0.0.1:1340", - "public_key": "ed01208E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F" - } - ], - "COMMIT_TIME_LIMIT_MS": 2000, - "MAX_TRANSACTIONS_IN_BLOCK": 8192, - "ACTOR_CHANNEL_CAPACITY": 100, - "GOSSIP_BATCH_SIZE": 500, - "GOSSIP_PERIOD_MS": 1000, - "DEBUG_FORCE_SOFT_FORK": false - }, - "TORII": { - "P2P_ADDR": "127.0.0.1:1337", - "API_URL": "127.0.0.1:8080", - "MAX_TRANSACTION_SIZE": 32768, - "MAX_CONTENT_LEN": 16384000 - }, - "BLOCK_SYNC": { - "GOSSIP_PERIOD_MS": 10000, - "BLOCK_BATCH_SIZE": 4, - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "QUEUE": { - "MAX_TRANSACTIONS_IN_QUEUE": 65536, - "MAX_TRANSACTIONS_IN_QUEUE_PER_USER": 65536, - "TRANSACTION_TIME_TO_LIVE_MS": 86400000, - "FUTURE_THRESHOLD_MS": 1000 - }, - "LOGGER": { - "LEVEL": "INFO", - "FORMAT": "full", - "TOKIO_CONSOLE_ADDR": "127.0.0.1:5555" - }, - "GENESIS": { - "PUBLIC_KEY": "ed01204CFFD0EE429B1BDD36B3910EC570852B8BB63F18750341772FB46BC856C5CAAF", - "PRIVATE_KEY": { - "digest_function": "ed25519", - "payload": "D748E18CE60CB30DEA3E73C9019B7AF45A8D465E3D71BCC9A5EF99A008205E534CFFD0EE429B1BDD36B3910EC570852B8BB63F18750341772FB46BC856C5CAAF" - }, - "WAIT_FOR_PEERS_RETRY_COUNT_LIMIT": 100, - "WAIT_FOR_PEERS_RETRY_PERIOD_MS": 500, - "GENESIS_SUBMISSION_DELAY_MS": 1000, - "FILE": "./genesis.json" - }, - "WSV": { - "ASSET_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ASSET_DEFINITION_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ACCOUNT_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "DOMAIN_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "IDENT_LENGTH_LIMITS": { - "min": 1, - "max": 128 - }, - "TRANSACTION_LIMITS": { - "max_instruction_number": 4096, - "max_wasm_size_bytes": 4194304 - }, - "WASM_RUNTIME_CONFIG": { - "FUEL_LIMIT": 1000000, - "MAX_MEMORY": 524288000 - } - }, - "NETWORK": { - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "TELEMETRY": { - "NAME": null, - "URL": null, - "MIN_RETRY_PERIOD": 1, - "MAX_RETRY_DELAY_EXPONENT": 4, - "FILE": null - }, - "SNAPSHOT": { - "CREATE_EVERY_MS": 60000, - "DIR_PATH": "./storage", - "CREATION_ENABLED": true - }, - "LIVE_QUERY_STORE": { - "QUERY_IDLE_TIME_MS": 30000 - } -} diff --git a/config/iroha_test_config.toml b/config/iroha_test_config.toml new file mode 100644 index 00000000000..eaade1dbfe4 --- /dev/null +++ b/config/iroha_test_config.toml @@ -0,0 +1,34 @@ +chain_id = "00000000-0000-0000-0000-000000000000" +public_key = "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B" +private_key = { digest_function = "ed25519", payload = "282ED9F3CF92811C3818DBC4AE594ED59DC1A2F78E4241E31924E101D6B1FB831C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B" } + +[network] +address = "127.0.0.1:1337" + +[genesis] +public_key = "ed01204CFFD0EE429B1BDD36B3910EC570852B8BB63F18750341772FB46BC856C5CAAF" +file = "./genesis.json" +private_key = { digest_function = "ed25519", payload = "D748E18CE60CB30DEA3E73C9019B7AF45A8D465E3D71BCC9A5EF99A008205E534CFFD0EE429B1BDD36B3910EC570852B8BB63F18750341772FB46BC856C5CAAF" } + +[torii] +address = "127.0.0.1:8080" + +[[sumeragi.trusted_peers]] +address = "127.0.0.1:1337" +public_key = "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B" + +[[sumeragi.trusted_peers]] +address = "127.0.0.1:1338" +public_key = "ed0120CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1" + +[[sumeragi.trusted_peers]] +address = "127.0.0.1:1339" +public_key = "ed0120FACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020" + +[[sumeragi.trusted_peers]] +address = "127.0.0.1:1340" +public_key = "ed01208E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F" + +[logger] +format = "pretty" + diff --git a/config/src/block_sync.rs b/config/src/block_sync.rs deleted file mode 100644 index dd927df3ece..00000000000 --- a/config/src/block_sync.rs +++ /dev/null @@ -1,50 +0,0 @@ -//! Module for `BlockSynchronizer`-related configuration and structs. -use iroha_config_base::derive::Proxy; -use serde::{Deserialize, Serialize}; - -const DEFAULT_BLOCK_BATCH_SIZE: u32 = 4; -const DEFAULT_GOSSIP_PERIOD_MS: u64 = 10000; -const DEFAULT_ACTOR_CHANNEL_CAPACITY: u32 = 100; - -/// Configuration for `BlockSynchronizer`. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Proxy)] -#[serde(rename_all = "UPPERCASE")] -#[config(env_prefix = "BLOCK_SYNC_")] -pub struct Configuration { - /// The period of time to wait between sending requests for the latest block. - pub gossip_period_ms: u64, - /// The number of blocks that can be sent in one message. - /// Underlying network (`iroha_network`) should support transferring messages this large. - pub block_batch_size: u32, - /// Buffer capacity of actor's MPSC channel - pub actor_channel_capacity: u32, -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - gossip_period_ms: Some(DEFAULT_GOSSIP_PERIOD_MS), - block_batch_size: Some(DEFAULT_BLOCK_BATCH_SIZE), - actor_channel_capacity: Some(DEFAULT_ACTOR_CHANNEL_CAPACITY), - } - } -} - -#[cfg(test)] -pub mod tests { - use proptest::prelude::*; - - use super::*; - - prop_compose! { - pub fn arb_proxy() - ( - gossip_period_ms in prop::option::of(Just(DEFAULT_GOSSIP_PERIOD_MS)), - block_batch_size in prop::option::of(Just(DEFAULT_BLOCK_BATCH_SIZE)), - actor_channel_capacity in prop::option::of(Just(DEFAULT_ACTOR_CHANNEL_CAPACITY)), - ) - -> ConfigurationProxy { - ConfigurationProxy { gossip_period_ms, block_batch_size, actor_channel_capacity } - } - } -} diff --git a/config/src/client.rs b/config/src/client.rs deleted file mode 100644 index bdf559ddcda..00000000000 --- a/config/src/client.rs +++ /dev/null @@ -1,236 +0,0 @@ -//! Module for client-related configuration and structs -use core::str::FromStr; -use std::num::NonZeroU64; - -use derive_more::Display; -use eyre::{Result, WrapErr}; -use iroha_config_base::derive::{Error as ConfigError, Proxy}; -use iroha_crypto::prelude::*; -use iroha_data_model::{prelude::*, ChainId}; -use iroha_primitives::small::SmallStr; -use serde::{Deserialize, Serialize}; -use url::Url; - -#[allow(unsafe_code)] -const DEFAULT_TRANSACTION_TIME_TO_LIVE_MS: NonZeroU64 = - unsafe { NonZeroU64::new_unchecked(100_000) }; -const DEFAULT_TRANSACTION_STATUS_TIMEOUT_MS: u64 = 15_000; -const DEFAULT_ADD_TRANSACTION_NONCE: bool = false; - -/// Wrapper over `SmallStr` to provide basic auth login checking -#[derive(Debug, Display, Clone, Serialize, PartialEq, Eq)] -pub struct WebLogin(SmallStr); - -impl WebLogin { - /// Construct new [`Self`] - /// - /// # Errors - /// Fails if `login` contains `:` character, which is the binary representation of the '\0'. - pub fn new(login: &str) -> Result { - Self::from_str(login) - } -} - -impl FromStr for WebLogin { - type Err = eyre::ErrReport; - fn from_str(login: &str) -> Result { - if login.contains(':') { - eyre::bail!("The `:` character, in `{login}` is not allowed"); - } - - Ok(Self(SmallStr::from_str(login))) - } -} - -/// Deserializing `WebLogin` with `FromStr` implementation -impl<'de> Deserialize<'de> for WebLogin { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - FromStr::from_str(&s).map_err(serde::de::Error::custom) - } -} - -/// Basic Authentication credentials -#[derive(Clone, Deserialize, Serialize, Debug, PartialEq, Eq)] -pub struct BasicAuth { - /// Login for Basic Authentication - pub web_login: WebLogin, - /// Password for Basic Authentication - pub password: SmallStr, -} - -/// `Configuration` provides an ability to define client parameters such as `TORII_URL`. -#[derive(Debug, Clone, Deserialize, Serialize, Proxy, PartialEq, Eq)] -#[serde(rename_all = "UPPERCASE")] -#[config(env_prefix = "IROHA_")] -pub struct Configuration { - /// Unique id of the blockchain. Used for simple replay attack protection. - pub chain_id: ChainId, - /// Public key of the user account. - #[config(serde_as_str)] - pub public_key: PublicKey, - /// Private key of the user account. - pub private_key: PrivateKey, - /// User account id. - pub account_id: AccountId, - /// Basic Authentication credentials - pub basic_auth: Option, - /// Torii URL. - pub torii_api_url: Url, - /// Proposed transaction TTL in milliseconds. - pub transaction_time_to_live_ms: Option, - /// Transaction status wait timeout in milliseconds. - pub transaction_status_timeout_ms: u64, - /// If `true` add nonce, which make different hashes for transactions which occur repeatedly and simultaneously - pub add_transaction_nonce: bool, -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - chain_id: None, - public_key: None, - private_key: None, - account_id: None, - basic_auth: Some(None), - torii_api_url: None, - transaction_time_to_live_ms: Some(Some(DEFAULT_TRANSACTION_TIME_TO_LIVE_MS)), - transaction_status_timeout_ms: Some(DEFAULT_TRANSACTION_STATUS_TIMEOUT_MS), - add_transaction_nonce: Some(DEFAULT_ADD_TRANSACTION_NONCE), - } - } -} - -// TODO: explain why these values were chosen. -const TTL_TOO_SMALL_THRESHOLD: u64 = 500; - -impl ConfigurationProxy { - /// Finalise Iroha client config proxy by checking that certain fields identify reasonable limits or - /// are well formatted. - /// - /// # Errors - /// - If the [`self.transaction_time_to_live_ms`] field is too small - /// - If the [`self.transaction_status_timeout_ms`] field is smaller than [`self.transaction_time_to_live_ms`] - /// - If the [`self.torii_api_url`] is malformed or had the wrong protocol - pub fn finish(&mut self) -> Result<()> { - if let Some(Some(tx_ttl)) = self.transaction_time_to_live_ms { - // Really small TTL would be detrimental to performance - if u64::from(tx_ttl) < TTL_TOO_SMALL_THRESHOLD { - eyre::bail!(ConfigError::InsaneValue { - field: "TRANSACTION_TIME_TO_LIVE_MS", - value: tx_ttl.to_string(), - message: format!(", because if it's smaller than {TTL_TOO_SMALL_THRESHOLD}, Iroha wouldn't be able to produce blocks on time.") - }); - } - // Timeouts bigger than transaction TTL don't make sense as then transaction would be discarded before this timeout - if let Some(timeout) = self.transaction_status_timeout_ms { - if timeout > tx_ttl.into() { - eyre::bail!(ConfigError::InsaneValue { - field: "TRANSACTION_STATUS_TIMEOUT_MS", - value: timeout.to_string(), - message: format!(", because it should be smaller than `TRANSACTION_TIME_TO_LIVE_MS`, which is {tx_ttl}") - }) - } - } - } - if let Some(api_url) = &self.torii_api_url { - if api_url.scheme() != "http" { - eyre::bail!(ConfigError::InsaneValue { - field: "TORII_API_URL", - value: api_url.to_string(), - message: ", because we only support HTTP".to_owned(), - }); - } - } - Ok(()) - } - - /// The wrapper around the client `ConfigurationProxy` that performs - /// finalisation prior to building `Configuration`. Just like - /// Iroha peer config, its `::build()` - /// method should never be used directly, as only this wrapper ensures final - /// coherence and fails if there are any issues. - /// - /// # Errors - /// - Finalisation fails - /// - Building fails, e.g. any of the inner fields had a `None` value when that - /// is not allowed by the defaults. - pub fn build(mut self) -> Result { - self.finish()?; - ::build(self) - .wrap_err("Failed to build `Configuration` from `ConfigurationProxy`") - } -} - -#[cfg(test)] -mod tests { - use iroha_config_base::proxy::LoadFromDisk; - use iroha_crypto::KeyGenConfiguration; - use proptest::prelude::*; - - use super::*; - use crate::torii::uri::DEFAULT_API_ADDR; - - const CONFIGURATION_PATH: &str = "../configs/client/config.json"; - - prop_compose! { - // TODO: make tests to check generated key validity - fn arb_keys_from_seed() - (seed in prop::collection::vec(any::(), 33..64)) -> (PublicKey, PrivateKey) { - let (public_key, private_key) = KeyPair::generate_with_configuration(KeyGenConfiguration::from_seed(seed)).expect("Seed was invalid").into(); - (public_key, private_key) - } - } - - prop_compose! { - fn arb_keys_with_option() - (keys in arb_keys_from_seed()) - ((a, b) in (prop::option::of(Just(keys.0)), prop::option::of(Just(keys.1)))) - -> (Option, Option) { - (a, b) - } - } - - fn placeholder_account() -> AccountId { - AccountId::from_str("alice@wonderland").expect("Invalid account Id ") - } - - prop_compose! { - fn arb_proxy() - ( - chain_id in prop::option::of(Just(crate::iroha::tests::placeholder_chain_id())), - (public_key, private_key) in arb_keys_with_option(), - account_id in prop::option::of(Just(placeholder_account())), - basic_auth in prop::option::of(Just(None)), - torii_api_url in prop::option::of(Just(format!("http://{DEFAULT_API_ADDR}").parse().unwrap())), - transaction_time_to_live_ms in prop::option::of(Just(Some(DEFAULT_TRANSACTION_TIME_TO_LIVE_MS))), - transaction_status_timeout_ms in prop::option::of(Just(DEFAULT_TRANSACTION_STATUS_TIMEOUT_MS)), - add_transaction_nonce in prop::option::of(Just(DEFAULT_ADD_TRANSACTION_NONCE)), - ) - -> ConfigurationProxy { - ConfigurationProxy { chain_id, public_key, private_key, account_id, basic_auth, torii_api_url, transaction_time_to_live_ms, transaction_status_timeout_ms, add_transaction_nonce } - } - } - - proptest! { - #[test] - fn client_proxy_build_fails_on_none(proxy in arb_proxy()) { - let cfg = proxy.build(); - if cfg.is_ok() { - let example_cfg = ConfigurationProxy::from_path(CONFIGURATION_PATH).build().expect("Failed to build example Iroha config. \ - This probably means that some of the fields of the `CONFIGURATION PATH` \ - JSON were not updated properly with new changes."); - let arb_cfg = cfg.expect("Config generated by proptest was checked to be ok by the surrounding if clause"); - // Skipping keys and `basic_auth` check as they're different from the file - assert_eq!(arb_cfg.torii_api_url, example_cfg.torii_api_url); - assert_eq!(arb_cfg.account_id, example_cfg.account_id); - assert_eq!(arb_cfg.transaction_time_to_live_ms, example_cfg.transaction_time_to_live_ms); - assert_eq!(arb_cfg.transaction_status_timeout_ms, example_cfg.transaction_status_timeout_ms); - assert_eq!(arb_cfg.add_transaction_nonce, example_cfg.add_transaction_nonce); - } - } - } -} diff --git a/config/src/client_api.rs b/config/src/client_api.rs index 030edb8523a..f87bc5b7a41 100644 --- a/config/src/client_api.rs +++ b/config/src/client_api.rs @@ -2,8 +2,8 @@ //! //! Intended usage: //! -//! - Create [`ConfigurationDTO`] from [`crate::iroha::Configuration`] and serialize it for the client -//! - Deserialize [`ConfigurationDTO`] from the client and use [`ConfigurationDTO::apply_update()`] to update the configuration +//! - Create [`ConfigDTO`] from [`crate::iroha::Configuration`] and serialize it for the client +//! - Deserialize [`ConfigDTO`] from the client and use [`ConfigDTO::apply_update()`] to update the configuration // TODO: Currently logic here is not generalised and handles only `logger.level` parameter. In future, when // other parts of configuration are refactored and there is a solid foundation e.g. as a general // configuration-related crate, this part should be re-written in a clean way. @@ -12,19 +12,19 @@ use iroha_data_model::Level; use serde::{Deserialize, Serialize}; -use super::{iroha::Configuration as BaseConfiguration, logger::Configuration as BaseLogger}; +use crate::parameters::actual::{Logger as BaseLogger, Root as BaseConfig}; /// Subset of [`super::iroha`] configuration. #[derive(Debug, Serialize, Deserialize, Clone, Copy)] -pub struct ConfigurationDTO { +pub struct ConfigDTO { #[allow(missing_docs)] pub logger: Logger, } -impl From<&'_ BaseConfiguration> for ConfigurationDTO { - fn from(value: &'_ BaseConfiguration) -> Self { +impl From<&'_ BaseConfig> for ConfigDTO { + fn from(value: &'_ BaseConfig) -> Self { Self { - logger: value.logger.as_ref().into(), + logger: (&value.logger).into(), } } } @@ -48,7 +48,7 @@ mod test { #[test] fn snapshot_serialized_form() { - let value = ConfigurationDTO { + let value = ConfigDTO { logger: Logger { level: Level::TRACE, }, diff --git a/config/src/genesis.rs b/config/src/genesis.rs deleted file mode 100644 index b6881ac4d65..00000000000 --- a/config/src/genesis.rs +++ /dev/null @@ -1,141 +0,0 @@ -//! Module with genesis configuration logic. -use std::path::PathBuf; - -use eyre::Report; -use iroha_config_base::derive::{view, Proxy}; -use iroha_crypto::{KeyPair, PrivateKey, PublicKey}; -use iroha_genesis::RawGenesisBlock; -use serde::{Deserialize, Serialize}; - -// Generate `ConfigurationView` without the private key -view! { - /// Configuration of the genesis block and the process of its submission. - #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] - #[serde(rename_all = "UPPERCASE")] - #[config(env_prefix = "IROHA_GENESIS_")] - pub struct Configuration { - /// The public key of the genesis account, should be supplied to all peers. - #[config(serde_as_str)] - pub public_key: PublicKey, - /// The private key of the genesis account, only needed for the peer that submits the genesis block. - #[view(ignore)] - pub private_key: Option, - /// Path to the genesis file - #[config(serde_as_str)] - pub file: Option - } -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - public_key: None, - private_key: Some(None), - file: None, - } - } -} - -/// Parsed variant of the user-provided [`Configuration`] -// TODO: incorporate this struct into the final, parsed configuration -// https://github.com/hyperledger/iroha/issues/3500 -pub enum ParsedConfiguration { - /// The peer can only observe the genesis block - Partial { - /// Genesis account public key - public_key: PublicKey, - }, - /// The peer is responsible for submitting the genesis block - Full { - /// Genesis account key pair - key_pair: KeyPair, - /// Raw genesis block - raw_block: RawGenesisBlock, - }, -} - -impl Configuration { - /// Parses user configuration into a stronger-typed structure [`ParsedConfiguration`] - /// - /// # Errors - /// See [`ParseError`] - pub fn parse(self, submit: bool) -> Result { - match (self.private_key, self.file, submit) { - (None, None, false) => Ok(ParsedConfiguration::Partial { - public_key: self.public_key, - }), - (Some(private_key), Some(path), true) => { - let raw_block = RawGenesisBlock::from_path(&path) - .map_err(|report| ParseError::File { path, report })?; - - Ok(ParsedConfiguration::Full { - key_pair: KeyPair::new(self.public_key, private_key)?, - raw_block, - }) - } - (_, _, true) => Err(ParseError::SubmitIsSetButRestAreNot), - (_, _, false) => Err(ParseError::SubmitIsNotSetButRestAre), - } - } -} - -/// Error which might occur during [`Configuration::parse()`] -#[derive(Debug, displaydoc::Display, thiserror::Error)] -pub enum ParseError { - /// `--submit-genesis` was provided, but `genesis.private_key` and/or `genesis.file` are missing - SubmitIsSetButRestAreNot, - /// `--submit-genesis` was not provided, but `genesis.private_key` and/or `genesis.file` are set - SubmitIsNotSetButRestAre, - /// Genesis key pair is invalid - InvalidKeyPair(#[from] iroha_crypto::error::Error), - /// Cannot read the genesis block from file `{path}` - File { - /// Original error report - #[source] - report: Report, - /// Path to the file - path: PathBuf, - }, -} - -#[cfg(test)] -pub mod tests { - use iroha_crypto::KeyPair; - use proptest::prelude::*; - - use super::*; - - /// Key-pair used by default for test purposes - fn placeholder_keypair() -> KeyPair { - let public_key = "ed01204CFFD0EE429B1BDD36B3910EC570852B8BB63F18750341772FB46BC856C5CAAF" - .parse() - .expect("Public key not in multihash format"); - let private_key = PrivateKey::from_hex( - iroha_crypto::Algorithm::Ed25519, - "D748E18CE60CB30DEA3E73C9019B7AF45A8D465E3D71BCC9A5EF99A008205E534CFFD0EE429B1BDD36B3910EC570852B8BB63F18750341772FB46BC856C5CAAF" - ).expect("Private key not hex encoded"); - - KeyPair::new(public_key, private_key).expect("Key pair mismatch") - } - - #[allow(clippy::option_option)] - fn arb_keys() -> BoxedStrategy<(Option, Option>)> { - let (pub_key, _) = placeholder_keypair().into(); - ( - prop::option::of(Just(pub_key)), - prop::option::of(Just(None)), - ) - .boxed() - } - - prop_compose! { - pub fn arb_proxy() - ( - (public_key, private_key) in arb_keys(), - file in prop::option::of(Just(None)) - ) - -> ConfigurationProxy { - ConfigurationProxy { public_key, private_key, file } - } - } -} diff --git a/config/src/iroha.rs b/config/src/iroha.rs deleted file mode 100644 index ac33c9a2f32..00000000000 --- a/config/src/iroha.rs +++ /dev/null @@ -1,282 +0,0 @@ -//! This module contains [`struct@Configuration`] structure and related implementation. -use std::fmt::Debug; - -use iroha_config_base::derive::{view, Error as ConfigError, Proxy}; -use iroha_crypto::prelude::*; -use iroha_data_model::ChainId; -use serde::{Deserialize, Serialize}; - -use super::*; - -// Generate `ConfigurationView` without the private key -view! { - /// Configuration parameters for a peer - #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] - #[serde(rename_all = "UPPERCASE")] - #[config(env_prefix = "IROHA_")] - pub struct Configuration { - /// Unique id of the blockchain. Used for simple replay attack protection. - #[config(serde_as_str)] - pub chain_id: ChainId, - /// Public key of this peer - #[config(serde_as_str)] - pub public_key: PublicKey, - /// Private key of this peer - #[view(ignore)] - pub private_key: PrivateKey, - /// `Kura` configuration - #[config(inner)] - pub kura: Box, - /// `Sumeragi` configuration - #[config(inner)] - #[view(into = Box)] - pub sumeragi: Box, - /// `Torii` configuration - #[config(inner)] - pub torii: Box, - /// `BlockSynchronizer` configuration - #[config(inner)] - pub block_sync: block_sync::Configuration, - /// `Queue` configuration - #[config(inner)] - pub queue: queue::Configuration, - /// `Logger` configuration - #[config(inner)] - pub logger: Box, - /// `GenesisBlock` configuration - #[config(inner)] - #[view(into = Box)] - pub genesis: Box, - /// `WorldStateView` configuration - #[config(inner)] - pub wsv: Box, - /// Network configuration - #[config(inner)] - pub network: network::Configuration, - /// Telemetry configuration - #[config(inner)] - pub telemetry: Box, - /// SnapshotMaker configuration - #[config(inner)] - pub snapshot: Box, - /// LiveQueryStore configuration - #[config(inner)] - pub live_query_store: live_query_store::Configuration, - } -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - chain_id: None, - public_key: None, - private_key: None, - kura: Some(Box::default()), - sumeragi: Some(Box::default()), - torii: Some(Box::default()), - block_sync: Some(block_sync::ConfigurationProxy::default()), - queue: Some(queue::ConfigurationProxy::default()), - logger: Some(Box::default()), - genesis: Some(Box::default()), - wsv: Some(Box::default()), - network: Some(network::ConfigurationProxy::default()), - telemetry: Some(Box::default()), - snapshot: Some(Box::default()), - live_query_store: Some(live_query_store::ConfigurationProxy::default()), - } - } -} - -impl ConfigurationProxy { - /// Finalise Iroha config proxy by instantiating mutually equivalent fields - /// via the uppermost Iroha config fields. Configuration fields provided in the - /// Iroha config always overwrite those in sumeragi even in case of discrepancy, - /// so proper care is advised. - /// - /// # Errors - /// - If the relevant uppermost Iroha config fields were not provided. - pub fn finish(&mut self) -> Result<(), ConfigError> { - if let Some(sumeragi_proxy) = &mut self.sumeragi { - // First, iroha public/private key and sumeragi keypair are interchangeable, but - // the user is allowed to provide only the former, and keypair is generated automatically, - // bailing out if key_pair provided in sumeragi no matter its value - if sumeragi_proxy.key_pair.is_some() { - return Err(ConfigError::ProvidedInferredField { - field: "key_pair", - message: "Sumeragi should not be provided with `KEY_PAIR` directly. That value is computed from the other config parameters. Please set the `KEY_PAIR` to `null` or omit entirely." - }); - } - if let (Some(public_key), Some(private_key)) = (&self.public_key, &self.private_key) { - sumeragi_proxy.key_pair = - Some(KeyPair::new(public_key.clone(), private_key.clone())?); - } else { - return Err(ConfigError::MissingField { - field: "PUBLIC_KEY and PRIVATE_KEY", - message: "The sumeragi keypair is not provided in the example configuration. It's done this way to ensure you don't re-use the example keys in production, and know how to generate new keys. Please have a look at \n\nhttps://hyperledger.github.io/iroha-2-docs/guide/configure/keys.html\n\nto learn more.\n\n-----", - }); - } - // Second, torii gateway and sumeragi peer id are interchangeable too; the latter is derived from the - // former and overwritten silently in case of difference - if let Some(torii_proxy) = &mut self.torii { - if sumeragi_proxy.peer_id.is_none() { - sumeragi_proxy.peer_id = Some(iroha_data_model::prelude::PeerId::new( - torii_proxy - .p2p_addr - .clone() - .ok_or(ConfigError::MissingField { - field: "p2p_addr", - message: - "`p2p_addr` should not be set to `null` or `None` explicitly.", - })?, - self.public_key.clone().expect( - "Iroha `public_key` should have been initialized above at the latest", - ), - )); - } else { - // TODO: should we just warn the user that this value will be ignored? - // TODO: Consider eliminating this value from the public API. - return Err(ConfigError::ProvidedInferredField { - field: "PEER_ID", - message: "The `peer_id` is computed from the key and address. You should remove it from the config.", - }); - } - } else { - return Err(ConfigError::MissingField{ - field: "p2p_addr", - message: "Torii config should have at least `p2p_addr` provided for sumeragi finalisation", - }); - } - - sumeragi_proxy.insert_self_as_trusted_peers() - } - - Ok(()) - } - - /// The wrapper around the topmost Iroha `ConfigurationProxy` - /// that performs finalisation prior to building. For the uppermost - /// Iroha config, its `::build()` - /// method should never be used directly, as only this wrapper ensures final - /// coherence. - /// - /// # Errors - /// - Finalisation fails - /// - Building fails, e.g. any of the inner fields had a `None` value when that - /// is not allowed by the defaults. - pub fn build(mut self) -> Result { - self.finish()?; - ::build(self) - } -} - -#[cfg(test)] -pub mod tests { - use std::path::PathBuf; - - use proptest::prelude::*; - - use super::*; - use crate::{base::proxy::LoadFromDisk, sumeragi::TrustedPeers}; - - const CONFIGURATION_PATH: &str = "./iroha_test_config.json"; - - /// Key-pair used for proptests generation - pub fn placeholder_keypair() -> KeyPair { - let private_key = PrivateKey::from_hex( - Algorithm::Ed25519, - "282ED9F3CF92811C3818DBC4AE594ED59DC1A2F78E4241E31924E101D6B1FB831C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B" - ).expect("Private key not hex encoded"); - - KeyPair::new( - "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B" - .parse() - .expect("Public key not in mulithash format"), - private_key, - ) - .expect("Key pair mismatch") - } - - fn arb_keys() -> BoxedStrategy<(Option, Option)> { - let (pub_key, priv_key) = placeholder_keypair().into(); - ( - prop::option::of(Just(pub_key)), - prop::option::of(Just(priv_key)), - ) - .boxed() - } - - pub fn placeholder_chain_id() -> ChainId { - ChainId::new("0") - } - - prop_compose! { - fn arb_proxy()( - chain_id in prop::option::of(Just(placeholder_chain_id())), - (public_key, private_key) in arb_keys(), - kura in prop::option::of(kura::tests::arb_proxy().prop_map(Box::new)), - sumeragi in (prop::option::of(sumeragi::tests::arb_proxy().prop_map(Box::new))), - torii in (prop::option::of(torii::tests::arb_proxy().prop_map(Box::new))), - block_sync in prop::option::of(block_sync::tests::arb_proxy()), - queue in prop::option::of(queue::tests::arb_proxy()), - logger in prop::option::of(logger::tests::arb_proxy().prop_map(Box::new)), - genesis in prop::option::of(genesis::tests::arb_proxy().prop_map(Box::new)), - wsv in prop::option::of(wsv::tests::arb_proxy().prop_map(Box::new)), - network in prop::option::of(network::tests::arb_proxy()), - telemetry in prop::option::of(telemetry::tests::arb_proxy().prop_map(Box::new)), - snapshot in prop::option::of(snapshot::tests::arb_proxy().prop_map(Box::new)), - live_query_store in prop::option::of(live_query_store::tests::arb_proxy()), - ) -> ConfigurationProxy { - ConfigurationProxy { chain_id, public_key, private_key, kura, sumeragi, torii, block_sync, queue, - logger, genesis, wsv, network, telemetry, snapshot, live_query_store } - } - } - - proptest! { - fn __iroha_proxy_build_fails_on_none(proxy in arb_proxy()) { - let cfg = proxy.build(); - let example_cfg = ConfigurationProxy::from_path(CONFIGURATION_PATH).build().expect("Failed to build example Iroha config"); - if cfg.is_ok() { - assert_eq!(cfg.unwrap(), example_cfg) - } - } - } - - #[test] - fn iroha_proxy_build_fails_on_none() { - // Using `stacker` because test generated by `proptest!` takes too much stack space. - // Allocating 3MB. - stacker::grow(3 * 1024 * 1024, __iroha_proxy_build_fails_on_none) - } - - #[test] - fn parse_example_json() { - let cfg_proxy = ConfigurationProxy::from_path(CONFIGURATION_PATH); - assert_eq!( - PathBuf::from("./storage"), - cfg_proxy.kura.unwrap().block_store_path.unwrap() - ); - assert_eq!( - 10000, - cfg_proxy - .block_sync - .expect("Block sync configuration was None") - .gossip_period_ms - .expect("Gossip period was None") - ); - } - - #[test] - fn example_json_proxy_builds() { - ConfigurationProxy::from_path(CONFIGURATION_PATH).build().unwrap_or_else(|err| panic!("`ConfigurationProxy` specified in {CONFIGURATION_PATH} \ - failed to build. This probably means that some of the fields there were not updated \ - properly with new changes. Error: {err}")); - } - - #[test] - #[should_panic(expected = "Failed to parse Trusted Peers")] - fn parse_trusted_peers_fail_duplicate_peer_id() { - let trusted_peers_string = r#"[{"address":"127.0.0.1:1337", "public_key": "ed0120954C83A4220FAFFB2C1D23FC5225B3E7952D53ACBB2A065FF30C631E5E1D6B10"}, {"address":"127.0.0.1:1337", "public_key": "ed0120954C83A4220FAFFB2C1D23FC5225B3E7952D53ACBB2A065FF30C631E5E1D6B10"}, {"address":"localhost:1338", "public_key": "ed0120954C83A4220FAFFB2C1D23FC5225B3E7952D53ACBB2A065FF30C631E5E1D6B10"}, {"address": "195.162.0.1:23", "public_key": "ed0120954C83A4220FAFFB2C1D23FC5225B3E7952D53ACBB2A065FF30C631E5E1D6B10"}]"#; - let _result: TrustedPeers = - serde_json::from_str(trusted_peers_string).expect("Failed to parse Trusted Peers"); - } -} diff --git a/config/src/kura.rs b/config/src/kura.rs index 8f97dbbf94b..507e44db3da 100644 --- a/config/src/kura.rs +++ b/config/src/kura.rs @@ -1,40 +1,23 @@ -//! Module for kura-related configuration and structs +//! Configuration tools related to Kura specifically. -use std::path::PathBuf; +// use iroha_config_base::{impl_deserialize_from_str, impl_serialize_display}; -use eyre::Result; -use iroha_config_base::derive::Proxy; -use serde::{Deserialize, Serialize}; - -const DEFAULT_BLOCK_STORE_PATH: &str = "./storage"; - -/// `Kura` configuration. -#[derive(Clone, Deserialize, Serialize, Debug, Proxy, PartialEq, Eq)] -#[serde(rename_all = "UPPERCASE")] -#[config(env_prefix = "KURA_")] -pub struct Configuration { - /// Initialization mode: `strict` or `fast`. - pub init_mode: Mode, - /// Path to the existing block store folder or path to create new folder. - #[config(serde_as_str)] - pub block_store_path: PathBuf, - /// Whether or not new blocks be outputted to a file called blocks.json. - pub debug_output_new_blocks: bool, -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - init_mode: Some(Mode::default()), - block_store_path: Some(DEFAULT_BLOCK_STORE_PATH.into()), - debug_output_new_blocks: Some(false), - } - } -} +use serde_with::{DeserializeFromStr, SerializeDisplay}; /// Kura initialization mode. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Deserialize, Serialize)] -#[serde(rename_all = "snake_case")] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + Eq, + Default, + strum::EnumString, + strum::Display, + DeserializeFromStr, + SerializeDisplay, +)] +#[strum(serialize_all = "snake_case")] pub enum Mode { /// Strict validation of all blocks. #[default] @@ -44,20 +27,14 @@ pub enum Mode { } #[cfg(test)] -pub mod tests { - use proptest::prelude::*; - - use super::*; - - prop_compose! { - pub fn arb_proxy() - ( - init_mode in prop::option::of(Just(Mode::default())), - block_store_path in prop::option::of(Just(DEFAULT_BLOCK_STORE_PATH.into())), - debug_output_new_blocks in prop::option::of(Just(false)) - ) - -> ConfigurationProxy { - ConfigurationProxy { init_mode, block_store_path, debug_output_new_blocks } - } +mod tests { + use crate::kura::Mode; + + #[test] + fn init_mode_display_reprs() { + assert_eq!(format!("{}", Mode::Strict), "strict"); + assert_eq!(format!("{}", Mode::Fast), "fast"); + assert_eq!("strict".parse::().unwrap(), Mode::Strict); + assert_eq!("fast".parse::().unwrap(), Mode::Fast); } } diff --git a/config/src/lib.rs b/config/src/lib.rs index 423e5a8dd19..1697443be46 100644 --- a/config/src/lib.rs +++ b/config/src/lib.rs @@ -1,20 +1,8 @@ -//! Aggregate configuration for different Iroha modules. +//! Iroha configuration and related utilities. + pub use iroha_config_base as base; -pub mod block_sync; -pub mod client; pub mod client_api; -pub mod genesis; -pub mod iroha; pub mod kura; -pub mod live_query_store; pub mod logger; -pub mod network; -pub mod path; -pub mod queue; -pub mod snapshot; -pub mod sumeragi; -pub mod telemetry; -pub mod torii; -pub mod wasm; -pub mod wsv; +pub mod parameters; diff --git a/config/src/live_query_store.rs b/config/src/live_query_store.rs deleted file mode 100644 index de8b2a31ec2..00000000000 --- a/config/src/live_query_store.rs +++ /dev/null @@ -1,44 +0,0 @@ -//! Module for `LiveQueryStore`-related configuration and structs. - -use std::num::NonZeroU64; - -use iroha_config_base::derive::Proxy; -use serde::{Deserialize, Serialize}; - -/// Default max time a query can remain in the store unaccessed -pub static DEFAULT_QUERY_IDLE_TIME_MS: once_cell::sync::Lazy = - once_cell::sync::Lazy::new(|| NonZeroU64::new(30_000).unwrap()); - -/// Configuration for `QueryService`. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] -#[serde(rename_all = "UPPERCASE")] -#[config(env_prefix = "LIVE_QUERY_STORE_")] -pub struct Configuration { - /// Time query can remain in the store if unaccessed - pub query_idle_time_ms: NonZeroU64, -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - query_idle_time_ms: Some(*DEFAULT_QUERY_IDLE_TIME_MS), - } - } -} - -#[cfg(test)] -pub mod tests { - use proptest::prelude::*; - - use super::*; - - prop_compose! { - pub fn arb_proxy() - ( - query_idle_time_ms in prop::option::of(Just(*DEFAULT_QUERY_IDLE_TIME_MS)), - ) - -> ConfigurationProxy { - ConfigurationProxy { query_idle_time_ms } - } - } -} diff --git a/config/src/logger.rs b/config/src/logger.rs index 6d5e4e9d5e6..e5038337396 100644 --- a/config/src/logger.rs +++ b/config/src/logger.rs @@ -1,15 +1,7 @@ -//! Module containing logic related to spawning a logger from the -//! configuration, as well as run-time reloading of the log-level. -use core::fmt::Debug; +//! Configuration utils related to Logger specifically. -use iroha_config_base::derive::Proxy; pub use iroha_data_model::Level; -#[cfg(feature = "tokio-console")] -use iroha_primitives::addr::{socket_addr, SocketAddr}; -use serde::{Deserialize, Serialize}; - -#[cfg(feature = "tokio-console")] -const DEFAULT_TOKIO_CONSOLE_ADDR: SocketAddr = socket_addr!(127.0.0.1:5555); +use serde_with::{DeserializeFromStr, SerializeDisplay}; /// Convert [`Level`] into [`tracing::Level`] pub fn into_tracing_level(level: Level) -> tracing::Level { @@ -22,28 +14,23 @@ pub fn into_tracing_level(level: Level) -> tracing::Level { } } -/// 'Logger' configuration. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] -#[serde(rename_all = "UPPERCASE")] -#[config(env_prefix = "LOG_")] -// `tokio_console_addr` is not `Copy`, but warning appears without `tokio-console` feature -#[allow(missing_copy_implementations)] -pub struct Configuration { - /// Level of logging verbosity - #[config(serde_as_str)] - pub level: Level, - /// Output format - pub format: Format, - #[cfg(feature = "tokio-console")] - /// Address of tokio console (only available under "tokio-console" feature) - pub tokio_console_addr: SocketAddr, -} - /// Reflects formatters in [`tracing_subscriber::fmt::format`] -#[derive(Debug, Copy, Clone, Eq, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "lowercase")] +#[derive( + Debug, + Copy, + Clone, + Eq, + PartialEq, + strum::Display, + strum::EnumString, + Default, + SerializeDisplay, + DeserializeFromStr, +)] +#[strum(serialize_all = "snake_case")] pub enum Format { /// See [`tracing_subscriber::fmt::format::Full`] + #[default] Full, /// See [`tracing_subscriber::fmt::format::Compact`] Compact, @@ -53,44 +40,9 @@ pub enum Format { Json, } -impl Default for Format { - fn default() -> Self { - Self::Full - } -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - level: Some(Level::default()), - format: Some(Format::default()), - #[cfg(feature = "tokio-console")] - tokio_console_addr: Some(DEFAULT_TOKIO_CONSOLE_ADDR), - } - } -} - #[cfg(test)] pub mod tests { - use proptest::prelude::*; - - use super::*; - - #[must_use = "strategies do nothing unless used"] - pub fn arb_proxy() -> impl proptest::strategy::Strategy { - let strat = ( - (prop::option::of(Just(Level::default()))), - (prop::option::of(Just(Format::default()))), - #[cfg(feature = "tokio-console")] - (prop::option::of(Just(DEFAULT_TOKIO_CONSOLE_ADDR))), - ); - proptest::strategy::Strategy::prop_map(strat, move |strat| ConfigurationProxy { - level: strat.0, - format: strat.1, - #[cfg(feature = "tokio-console")] - tokio_console_addr: strat.2, - }) - } + use crate::logger::Format; #[test] fn serialize_pretty_format_in_lowercase() { diff --git a/config/src/network.rs b/config/src/network.rs deleted file mode 100644 index 845743fac42..00000000000 --- a/config/src/network.rs +++ /dev/null @@ -1,39 +0,0 @@ -//! Module for network-related configuration and structs -use iroha_config_base::derive::Proxy; -use serde::{Deserialize, Serialize}; - -const DEFAULT_ACTOR_CHANNEL_CAPACITY: u32 = 100; - -/// Network Configuration parameters -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Proxy)] -#[serde(rename_all = "UPPERCASE")] -#[config(env_prefix = "IROHA_NETWORK_")] -pub struct Configuration { - /// Buffer capacity of actor's MPSC channel - pub actor_channel_capacity: u32, -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - actor_channel_capacity: Some(DEFAULT_ACTOR_CHANNEL_CAPACITY), - } - } -} - -#[cfg(test)] -pub mod tests { - use proptest::prelude::*; - - use super::*; - - prop_compose! { - pub fn arb_proxy() - ( - actor_channel_capacity in prop::option::of(Just(DEFAULT_ACTOR_CHANNEL_CAPACITY)), - ) - -> ConfigurationProxy { - ConfigurationProxy { actor_channel_capacity } - } - } -} diff --git a/config/src/parameters/actual.rs b/config/src/parameters/actual.rs new file mode 100644 index 00000000000..9a54da8e990 --- /dev/null +++ b/config/src/parameters/actual.rs @@ -0,0 +1,251 @@ +//! "Actual" layer of Iroha configuration parameters. It contains strongly-typed validated +//! structures in a way that is efficient for Iroha internally. + +use std::{ + num::NonZeroU32, + path::{Path, PathBuf}, + time::Duration, +}; + +use iroha_config_base::{FromEnv, StdEnv, UnwrapPartial}; +use iroha_crypto::{KeyPair, PublicKey}; +use iroha_data_model::{ + metadata::Limits as MetadataLimits, peer::PeerId, transaction::TransactionLimits, ChainId, + LengthLimits, +}; +use iroha_primitives::{addr::SocketAddr, unique_vec::UniqueVec}; +use serde::{Deserialize, Serialize}; +use url::Url; +pub use user::{Logger, Queue, Snapshot}; + +use crate::{ + kura::Mode, + parameters::{ + defaults, user, + user::{CliContext, RootPartial}, + }, +}; + +/// Parsed configuration root +#[derive(Debug, Clone)] +#[allow(missing_docs)] +pub struct Root { + pub common: Common, + pub genesis: Genesis, + pub torii: Torii, + pub kura: Kura, + pub sumeragi: Sumeragi, + pub block_sync: BlockSync, + pub transaction_gossiper: TransactionGossiper, + pub live_query_store: LiveQueryStore, + pub logger: Logger, + pub queue: Queue, + pub snapshot: Snapshot, + pub telemetry: Option, + pub dev_telemetry: Option, + pub chain_wide: ChainWide, +} + +impl Root { + /// Loads configuration from a file and environment variables + /// + /// # Errors + /// - unable to load config from a TOML file + /// - unable to parse config from envs + /// - the config is invalid + pub fn load>(path: Option

, cli: CliContext) -> Result { + let from_file = path.map(RootPartial::from_toml).transpose()?; + let from_env = RootPartial::from_env(&StdEnv)?; + let merged = match from_file { + Some(x) => x.merge(from_env), + None => from_env, + }; + let config = merged.unwrap_partial()?.parse(cli)?; + Ok(config) + } +} + +/// Common options shared between multiple places +#[allow(missing_docs)] +#[derive(Debug, Clone)] +pub struct Common { + pub chain_id: ChainId, + pub key_pair: KeyPair, + pub p2p_address: SocketAddr, +} + +impl Common { + /// Construct an id of this peer + pub fn peer_id(&self) -> PeerId { + PeerId::new(self.p2p_address.clone(), self.key_pair.public_key().clone()) + } +} + +/// Parsed genesis configuration +#[derive(Debug, Clone)] +pub enum Genesis { + /// The peer can only observe the genesis block + Partial { + /// Genesis account public key + public_key: PublicKey, + }, + /// The peer is responsible for submitting the genesis block + Full { + /// Genesis account key pair + key_pair: KeyPair, + /// Path to the [`RawGenesisBlock`] + file: PathBuf, + }, +} + +impl Genesis { + /// Access the public key, which is always present in the genesis config + pub fn public_key(&self) -> &PublicKey { + match self { + Self::Partial { public_key } => public_key, + Self::Full { key_pair, .. } => key_pair.public_key(), + } + } + + /// Access the key pair, if present + pub fn key_pair(&self) -> Option<&KeyPair> { + match self { + Self::Partial { .. } => None, + Self::Full { key_pair, .. } => Some(key_pair), + } + } +} + +#[allow(missing_docs)] +#[derive(Debug, Clone)] +pub struct Kura { + pub init_mode: Mode, + pub store_dir: PathBuf, + pub debug_output_new_blocks: bool, +} + +impl Default for Queue { + fn default() -> Self { + Self { + transaction_time_to_live: defaults::queue::DEFAULT_TRANSACTION_TIME_TO_LIVE, + future_threshold: defaults::queue::DEFAULT_FUTURE_THRESHOLD, + capacity: defaults::queue::DEFAULT_MAX_TRANSACTIONS_IN_QUEUE, + capacity_per_user: defaults::queue::DEFAULT_MAX_TRANSACTIONS_IN_QUEUE_PER_USER, + } + } +} + +#[derive(Debug, Clone)] +#[allow(missing_docs)] +pub struct Sumeragi { + pub trusted_peers: UniqueVec, + pub debug_force_soft_fork: bool, +} + +#[derive(Debug, Clone, Copy)] +#[allow(missing_docs)] +pub struct LiveQueryStore { + pub idle_time: Duration, +} + +impl Default for LiveQueryStore { + fn default() -> Self { + Self { + idle_time: defaults::torii::DEFAULT_QUERY_IDLE_TIME, + } + } +} + +#[allow(missing_docs)] +#[derive(Debug, Clone, Copy)] +pub struct BlockSync { + pub gossip_period: Duration, + pub gossip_max_size: NonZeroU32, +} + +#[derive(Debug, Clone, Copy)] +#[allow(missing_docs)] +pub struct TransactionGossiper { + pub gossip_period: Duration, + pub gossip_max_size: NonZeroU32, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[allow(missing_docs)] +pub struct ChainWide { + pub max_transactions_in_block: NonZeroU32, + pub block_time: Duration, + pub commit_time: Duration, + pub transaction_limits: TransactionLimits, + pub asset_metadata_limits: MetadataLimits, + pub asset_definition_metadata_limits: MetadataLimits, + pub account_metadata_limits: MetadataLimits, + pub domain_metadata_limits: MetadataLimits, + pub ident_length_limits: LengthLimits, + pub wasm_runtime: WasmRuntime, +} + +impl ChainWide { + /// Calculate pipeline time based on the block time and commit time + pub fn pipeline_time(&self) -> Duration { + self.block_time + self.commit_time + } +} + +impl Default for ChainWide { + fn default() -> Self { + Self { + max_transactions_in_block: defaults::chain_wide::DEFAULT_MAX_TXS, + block_time: defaults::chain_wide::DEFAULT_BLOCK_TIME, + commit_time: defaults::chain_wide::DEFAULT_COMMIT_TIME, + transaction_limits: defaults::chain_wide::DEFAULT_TRANSACTION_LIMITS, + domain_metadata_limits: defaults::chain_wide::DEFAULT_METADATA_LIMITS, + account_metadata_limits: defaults::chain_wide::DEFAULT_METADATA_LIMITS, + asset_definition_metadata_limits: defaults::chain_wide::DEFAULT_METADATA_LIMITS, + asset_metadata_limits: defaults::chain_wide::DEFAULT_METADATA_LIMITS, + ident_length_limits: defaults::chain_wide::DEFAULT_IDENT_LENGTH_LIMITS, + wasm_runtime: WasmRuntime::default(), + } + } +} + +#[allow(missing_docs)] +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub struct WasmRuntime { + pub fuel_limit: u64, + // TODO: wrap into a `Bytes` newtype + pub max_memory_bytes: u32, +} + +impl Default for WasmRuntime { + fn default() -> Self { + Self { + fuel_limit: defaults::chain_wide::DEFAULT_WASM_FUEL_LIMIT, + max_memory_bytes: defaults::chain_wide::DEFAULT_WASM_MAX_MEMORY_BYTES, + } + } +} + +#[derive(Debug, Clone)] +#[allow(missing_docs)] +pub struct Torii { + pub address: SocketAddr, + pub max_content_len_bytes: u64, +} + +/// Complete configuration needed to start regular telemetry. +#[derive(Debug, Clone)] +#[allow(missing_docs)] +pub struct Telemetry { + pub name: String, + pub url: Url, + pub min_retry_period: Duration, + pub max_retry_delay_exponent: u8, +} + +/// Complete configuration needed to start dev telemetry. +#[derive(Debug, Clone)] +#[allow(missing_docs)] +pub struct DevTelemetry { + pub out_file: PathBuf, +} diff --git a/config/src/parameters/defaults.rs b/config/src/parameters/defaults.rs new file mode 100644 index 00000000000..a6f779d087b --- /dev/null +++ b/config/src/parameters/defaults.rs @@ -0,0 +1,104 @@ +//! Parameters default values + +// TODO: document if needed +#![allow(missing_docs)] + +use std::{ + num::{NonZeroU32, NonZeroUsize}, + time::Duration, +}; + +use iroha_data_model::{prelude::MetadataLimits, transaction::TransactionLimits, LengthLimits}; +use nonzero_ext::nonzero; + +pub mod queue { + use super::*; + + pub const DEFAULT_MAX_TRANSACTIONS_IN_QUEUE: NonZeroUsize = nonzero!(2_usize.pow(16)); + pub const DEFAULT_MAX_TRANSACTIONS_IN_QUEUE_PER_USER: NonZeroUsize = nonzero!(2_usize.pow(16)); + // 24 hours + pub const DEFAULT_TRANSACTION_TIME_TO_LIVE: Duration = Duration::from_secs(24 * 60 * 60); + pub const DEFAULT_FUTURE_THRESHOLD: Duration = Duration::from_secs(1); +} +pub mod kura { + pub const DEFAULT_STORE_DIR: &str = "./storage"; +} + +#[cfg(feature = "tokio-console")] +pub mod logger { + use iroha_primitives::addr::{socket_addr, SocketAddr}; + + pub const DEFAULT_TOKIO_CONSOLE_ADDR: SocketAddr = socket_addr!(127.0.0.1:5555); +} + +pub mod network { + use super::*; + + pub const DEFAULT_TRANSACTION_GOSSIP_PERIOD: Duration = Duration::from_secs(1); + + pub const DEFAULT_BLOCK_GOSSIP_PERIOD: Duration = Duration::from_secs(10); + + pub const DEFAULT_MAX_TRANSACTIONS_PER_GOSSIP: NonZeroU32 = nonzero!(500u32); + pub const DEFAULT_MAX_BLOCKS_PER_GOSSIP: NonZeroU32 = nonzero!(4u32); +} + +pub mod snapshot { + use super::*; + + pub const DEFAULT_STORE_DIR: &str = "./storage/snapshot"; + // Default frequency of making snapshots is 1 minute, need to be adjusted for larger world state view size + pub const DEFAULT_CREATE_EVERY: Duration = Duration::from_secs(60); + pub const DEFAULT_ENABLED: bool = true; +} + +pub mod chain_wide { + + use super::*; + + pub const DEFAULT_MAX_TXS: NonZeroU32 = nonzero!(2_u32.pow(9)); + pub const DEFAULT_BLOCK_TIME: Duration = Duration::from_secs(2); + pub const DEFAULT_COMMIT_TIME: Duration = Duration::from_secs(4); + pub const DEFAULT_WASM_FUEL_LIMIT: u64 = 55_000_000; + // TODO: wrap into a `Bytes` newtype + pub const DEFAULT_WASM_MAX_MEMORY_BYTES: u32 = 500 * 2_u32.pow(20); + + /// Default estimation of consensus duration. + pub const DEFAULT_CONSENSUS_ESTIMATION: Duration = + match DEFAULT_BLOCK_TIME.checked_add(match DEFAULT_COMMIT_TIME.checked_div(2) { + Some(x) => x, + None => unreachable!(), + }) { + Some(x) => x, + None => unreachable!(), + }; + + /// Default limits for metadata + pub const DEFAULT_METADATA_LIMITS: MetadataLimits = + MetadataLimits::new(2_u32.pow(20), 2_u32.pow(12)); + /// Default limits for ident length + pub const DEFAULT_IDENT_LENGTH_LIMITS: LengthLimits = LengthLimits::new(1, 2_u32.pow(7)); + /// Default maximum number of instructions and expressions per transaction + pub const DEFAULT_MAX_INSTRUCTION_NUMBER: u64 = 2_u64.pow(12); + /// Default maximum number of instructions and expressions per transaction + pub const DEFAULT_MAX_WASM_SIZE_BYTES: u64 = 4 * 2_u64.pow(20); + + /// Default transaction limits + pub const DEFAULT_TRANSACTION_LIMITS: TransactionLimits = + TransactionLimits::new(DEFAULT_MAX_INSTRUCTION_NUMBER, DEFAULT_MAX_WASM_SIZE_BYTES); +} + +pub mod torii { + use std::time::Duration; + + pub const DEFAULT_MAX_CONTENT_LENGTH: u64 = 2_u64.pow(20) * 16; + pub const DEFAULT_QUERY_IDLE_TIME: Duration = Duration::from_secs(30); +} + +pub mod telemetry { + use std::time::Duration; + + /// Default minimal retry period + pub const DEFAULT_MIN_RETRY_PERIOD: Duration = Duration::from_secs(1); + /// Default maximum exponent for the retry delay + pub const DEFAULT_MAX_RETRY_DELAY_EXPONENT: u8 = 4; +} diff --git a/config/src/parameters/mod.rs b/config/src/parameters/mod.rs new file mode 100644 index 00000000000..7a4e330ccc6 --- /dev/null +++ b/config/src/parameters/mod.rs @@ -0,0 +1,5 @@ +//! Iroha configuration parameters on different layers and their default values. + +pub mod actual; +pub mod defaults; +pub mod user; diff --git a/config/src/parameters/user.rs b/config/src/parameters/user.rs new file mode 100644 index 00000000000..57238af0aaf --- /dev/null +++ b/config/src/parameters/user.rs @@ -0,0 +1,704 @@ +//! User configuration view. Contains structures in a format that is +//! convenient from the user perspective. It is less strict and not necessarily valid upon +//! successful parsing of the user-provided content. +//! +//! It begins with [`Root`], containing sub-modules. Every structure has its `-Partial` +//! representation (e.g. [`RootPartial`]). + +// This module's usage is documented in high detail in the Configuration Reference +// (TODO link to docs) +#![allow(missing_docs)] + +use std::{ + error::Error, + fmt::Debug, + fs::File, + io::Read, + num::{NonZeroU32, NonZeroUsize}, + path::{Path, PathBuf}, + time::Duration, +}; + +pub use boilerplate::*; +use eyre::{eyre, Report, WrapErr}; +use iroha_config_base::{Emitter, ErrorsCollection, HumanBytes, Merge, ParseEnvResult, ReadEnv}; +use iroha_crypto::{KeyPair, PrivateKey, PublicKey}; +use iroha_data_model::{ + metadata::Limits as MetadataLimits, peer::PeerId, transaction::TransactionLimits, ChainId, + LengthLimits, Level, +}; +use iroha_primitives::{addr::SocketAddr, unique_vec::UniqueVec}; +use url::Url; + +use crate::{ + kura::Mode, + logger::Format, + parameters::{actual, defaults::telemetry::*}, +}; + +mod boilerplate; + +#[derive(Debug)] +pub struct Root { + chain_id: ChainId, + public_key: PublicKey, + private_key: PrivateKey, + genesis: Genesis, + kura: Kura, + sumeragi: Sumeragi, + network: Network, + logger: Logger, + queue: Queue, + snapshot: Snapshot, + telemetry: Telemetry, + torii: Torii, + chain_wide: ChainWide, +} + +impl RootPartial { + /// Read the partial from TOML file + /// + /// # Errors + /// - If file is not found, or not a valid TOML + /// - If failed to parse data into a layer + /// - If failed to read other configurations specified in `extends` + pub fn from_toml(path: impl AsRef) -> eyre::Result { + let contents = { + let mut file = File::open(path.as_ref()).wrap_err_with(|| { + eyre!("cannot open file at location `{}`", path.as_ref().display()) + })?; + let mut contents = String::new(); + file.read_to_string(&mut contents)?; + contents + }; + let mut layer: Self = toml::from_str(&contents).wrap_err("failed to parse toml")?; + + let base_path = path + .as_ref() + .parent() + .expect("the config file path could not be empty or root"); + + layer.normalise_paths(base_path); + + if let Some(paths) = layer.extends.take() { + let base = paths + .iter() + .try_fold(None, |acc: Option, extends_path| { + // extends path is not normalised relative to the config file yet + let full_path = base_path.join(extends_path); + + let base = Self::from_toml(&full_path) + .wrap_err_with(|| eyre!("cannot extend from `{}`", full_path.display()))?; + + match acc { + None => Ok::, Report>(Some(base)), + Some(other_base) => Ok(Some(other_base.merge(base))), + } + })?; + if let Some(base) = base { + layer = base.merge(layer) + }; + } + + Ok(layer) + } + + /// **Note:** this function doesn't affect `extends` + fn normalise_paths(&mut self, relative_to: impl AsRef) { + let path = relative_to.as_ref(); + + macro_rules! patch { + ($value:expr) => { + $value.as_mut().map(|x| { + *x = path.join(&x); + }) + }; + } + + patch!(self.genesis.file); + patch!(self.snapshot.store_dir); + patch!(self.kura.store_dir); + patch!(self.telemetry.dev.out_file); + } + + // FIXME workaround the inconvenient way `Merge::merge` works + #[must_use] + pub fn merge(mut self, other: Self) -> Self { + Merge::merge(&mut self, other); + self + } +} + +impl Root { + /// Parses user configuration view into the internal repr. + /// + /// # Errors + /// If any invalidity found. + pub fn parse(self, cli: CliContext) -> Result> { + let mut emitter = Emitter::new(); + + let key_pair = + KeyPair::new(self.public_key, self.private_key) + .wrap_err("failed to construct a key pair from `iroha.public_key` and `iroha.private_key` configuration parameters") + .map_or_else(|err| { + emitter.emit(err); + None + }, Some); + + let genesis = self.genesis.parse(cli).map_or_else( + |err| { + // FIXME + emitter.emit(eyre!("{err}")); + None + }, + Some, + ); + + let kura = self.kura.parse(); + + let sumeragi = self.sumeragi.parse().map_or_else( + |err| { + emitter.emit(err); + None + }, + Some, + ); + + if let Some(ref config) = sumeragi { + if !cli.submit_genesis && config.trusted_peers.len() == 0 { + emitter.emit(eyre!("\ + The network consists from this one peer only (no `sumeragi.trusted_peers` provided). \ + Since `--submit-genesis` is not set, there is no way to receive the genesis block. \ + Either provide the genesis by setting `--submit-genesis` argument, `genesis.private_key`, \ + and `genesis.file` configuration parameters, or increase the number of trusted peers in \ + the network using `sumeragi.trusted_peers` configuration parameter.\ + ")); + } + } + + let (p2p_address, block_sync, transaction_gossiper) = self.network.parse(); + + let logger = self.logger; + let queue = self.queue; + let snapshot = self.snapshot; + + let (torii, live_query_store) = self.torii.parse(); + + let telemetries = self.telemetry.parse().map_or_else( + |err| { + emitter.emit(err); + None + }, + Some, + ); + + let chain_wide = self.chain_wide.parse(); + + if p2p_address == torii.address { + emitter.emit(eyre!( + "`iroha.p2p_address` and `torii.address` should not be the same" + )) + } + + emitter.finish()?; + + let peer = actual::Common { + chain_id: self.chain_id, + key_pair: key_pair.unwrap(), + p2p_address, + }; + let (telemetry, dev_telemetry) = telemetries.unwrap(); + let genesis = genesis.unwrap(); + let sumeragi = { + let mut x = sumeragi.unwrap(); + x.trusted_peers.push(peer.peer_id()); + x + }; + + Ok(actual::Root { + common: peer, + genesis, + torii, + kura, + sumeragi, + block_sync, + transaction_gossiper, + live_query_store, + logger, + queue, + snapshot, + telemetry, + dev_telemetry, + chain_wide, + }) + } +} + +#[derive(Copy, Clone)] +pub struct CliContext { + pub submit_genesis: bool, +} + +pub(crate) fn private_key_from_env( + emitter: &mut Emitter, + env: &impl ReadEnv, + env_key_base: impl AsRef, + name_base: impl AsRef, +) -> ParseEnvResult { + let digest_env = format!("{}_DIGEST", env_key_base.as_ref()); + let digest_name = format!("{}.digest_function", name_base.as_ref()); + let payload_env = format!("{}_PAYLOAD", env_key_base.as_ref()); + let payload_name = format!("{}.payload", name_base.as_ref()); + + let digest_function = ParseEnvResult::parse_simple(emitter, env, &digest_env, &digest_name); + + // FIXME: errors handling is a mess + let payload = match env + .read_env(&payload_env) + .map_err(|err| eyre!("failed to read {payload_name}: {err}")) + .wrap_err("oops") + { + Ok(Some(value)) => ParseEnvResult::Value(value), + Ok(None) => ParseEnvResult::None, + Err(err) => { + emitter.emit(err); + ParseEnvResult::Error + } + }; + + match (digest_function, payload) { + (ParseEnvResult::Value(digest_function), ParseEnvResult::Value(payload)) => { + match PrivateKey::from_hex(digest_function, &payload).wrap_err_with(|| { + eyre!( + "failed to construct `{}` from `{}` and `{}` environment variables", + name_base.as_ref(), + &digest_env, + &payload_env + ) + }) { + Ok(value) => return ParseEnvResult::Value(value), + Err(report) => { + emitter.emit(report); + } + } + } + (ParseEnvResult::None, ParseEnvResult::None) => return ParseEnvResult::None, + (ParseEnvResult::Value(_), ParseEnvResult::None) => emitter.emit(eyre!( + "`{}` env was provided, but `{}` was not", + &digest_env, + &payload_env + )), + (ParseEnvResult::None, ParseEnvResult::Value(_)) => { + emitter.emit(eyre!( + "`{}` env was provided, but `{}` was not", + &payload_env, + &digest_env + )); + } + (ParseEnvResult::Error, _) | (_, ParseEnvResult::Error) => { + // emitter already has these errors + // adding this branch for exhaustiveness + } + } + + ParseEnvResult::Error +} + +#[derive(Debug)] +pub struct Genesis { + pub public_key: PublicKey, + pub private_key: Option, + pub file: Option, +} + +impl Genesis { + fn parse(self, cli: CliContext) -> Result { + match (self.private_key, self.file, cli.submit_genesis) { + (None, None, false) => Ok(actual::Genesis::Partial { + public_key: self.public_key, + }), + (Some(private_key), Some(file), true) => Ok(actual::Genesis::Full { + key_pair: KeyPair::new(self.public_key, private_key) + .map_err(GenesisConfigError::from)?, + file, + }), + (Some(_), Some(_), false) => Err(GenesisConfigError::GenesisWithoutSubmit), + (None, None, true) => Err(GenesisConfigError::SubmitWithoutGenesis), + _ => Err(GenesisConfigError::Inconsistent), + } + } +} + +#[derive(Debug, displaydoc::Display, thiserror::Error)] +pub enum GenesisConfigError { + /// `genesis.file` and `genesis.private_key` are presented, but `--submit-genesis` was not set + GenesisWithoutSubmit, + /// `--submit-genesis` was set, but `genesis.file` and `genesis.private_key` are not presented + SubmitWithoutGenesis, + /// `genesis.file` and `genesis.private_key` should be set together + Inconsistent, + /// failed to construct the genesis's keypair using `genesis.public_key` and `genesis.private_key` configuration parameters + KeyPair(#[from] iroha_crypto::error::Error), +} + +#[derive(Debug)] +pub struct Kura { + pub init_mode: Mode, + pub store_dir: PathBuf, + pub debug: KuraDebug, +} + +impl Kura { + fn parse(self) -> actual::Kura { + let Self { + init_mode, + store_dir: block_store_path, + debug: + KuraDebug { + output_new_blocks: debug_output_new_blocks, + }, + } = self; + + actual::Kura { + init_mode, + store_dir: block_store_path, + debug_output_new_blocks, + } + } +} + +#[derive(Debug, Copy, Clone)] +pub struct KuraDebug { + output_new_blocks: bool, +} + +#[derive(Debug)] +pub struct Sumeragi { + pub trusted_peers: Option>, + pub debug: SumeragiDebug, +} + +impl Sumeragi { + fn parse(self) -> Result { + let Self { + trusted_peers, + debug: SumeragiDebug { force_soft_fork }, + } = self; + + let trusted_peers = construct_unique_vec(trusted_peers.unwrap_or(vec![]))?; + + Ok(actual::Sumeragi { + trusted_peers, + debug_force_soft_fork: force_soft_fork, + }) + } +} + +#[derive(Debug, Copy, Clone)] +pub struct SumeragiDebug { + pub force_soft_fork: bool, +} + +// FIXME: handle duplicates properly, not here, and with details +fn construct_unique_vec( + unchecked: Vec, +) -> Result, eyre::Report> { + let mut unique = UniqueVec::new(); + for x in unchecked { + let pushed = unique.push(x); + if !pushed { + Err(eyre!("found duplicate"))? + } + } + Ok(unique) +} + +#[derive(Debug, Clone)] +pub struct Network { + /// Peer-to-peer address + pub address: SocketAddr, + pub block_gossip_max_size: NonZeroU32, + pub block_gossip_period: Duration, + pub transaction_gossip_max_size: NonZeroU32, + pub transaction_gossip_period: Duration, +} + +impl Network { + fn parse(self) -> (SocketAddr, actual::BlockSync, actual::TransactionGossiper) { + let Self { + address, + block_gossip_max_size, + block_gossip_period, + transaction_gossip_max_size, + transaction_gossip_period, + } = self; + + ( + address, + actual::BlockSync { + gossip_period: block_gossip_period, + gossip_max_size: block_gossip_max_size, + }, + actual::TransactionGossiper { + gossip_period: transaction_gossip_period, + gossip_max_size: transaction_gossip_max_size, + }, + ) + } +} + +#[derive(Debug, Clone, Copy)] +pub struct Queue { + /// The upper limit of the number of transactions waiting in the queue. + pub capacity: NonZeroUsize, + /// The upper limit of the number of transactions waiting in the queue for single user. + /// Use this option to apply throttling. + pub capacity_per_user: NonZeroUsize, + /// The transaction will be dropped after this time if it is still in the queue. + pub transaction_time_to_live: Duration, + /// The threshold to determine if a transaction has been tampered to have a future timestamp. + pub future_threshold: Duration, +} + +#[allow(missing_copy_implementations)] // triggered without tokio-console +#[derive(Debug, Clone)] +pub struct Logger { + /// Level of logging verbosity + // TODO: parse user provided value in a case insensitive way, + // because `format` is set in lowercase, and `LOG_LEVEL=INFO` + `LOG_FORMAT=pretty` + // looks inconsistent + pub level: Level, + /// Output format + pub format: Format, + #[cfg(feature = "tokio-console")] + /// Address of tokio console (only available under "tokio-console" feature) + pub tokio_console_address: SocketAddr, +} + +#[allow(clippy::derivable_impls)] // triggers in absence of `tokio-console` feature +impl Default for Logger { + fn default() -> Self { + Self { + level: Level::default(), + format: Format::default(), + #[cfg(feature = "tokio-console")] + tokio_console_address: super::defaults::logger::DEFAULT_TOKIO_CONSOLE_ADDR, + } + } +} + +#[derive(Debug)] +pub struct Telemetry { + // Fields here are Options so that it is possible to warn the user if e.g. they provided `min_retry_period`, but haven't + // provided `name` and `url` + pub name: Option, + pub url: Option, + pub min_retry_period: Option, + pub max_retry_delay_exponent: Option, + pub dev: TelemetryDev, +} + +#[derive(Debug)] +pub struct TelemetryDev { + pub out_file: Option, +} + +impl Telemetry { + fn parse(self) -> Result<(Option, Option), Report> { + let Self { + name, + url, + max_retry_delay_exponent, + min_retry_period, + dev: TelemetryDev { out_file: file }, + } = self; + + let regular = match (name, url) { + (Some(name), Some(url)) => Some(actual::Telemetry { + name, + url, + max_retry_delay_exponent: max_retry_delay_exponent + .unwrap_or(DEFAULT_MAX_RETRY_DELAY_EXPONENT), + min_retry_period: min_retry_period.unwrap_or(DEFAULT_MIN_RETRY_PERIOD), + }), + // TODO warn user if they provided retry parameters while not providing essential ones + (None, None) => None, + _ => { + // TODO improve error detail + return Err(eyre!( + "telemetry.name and telemetry.file should be set together" + ))?; + } + }; + + let dev = file.map(|file| actual::DevTelemetry { + out_file: file.clone(), + }); + + Ok((regular, dev)) + } +} + +#[derive(Debug, Clone)] +pub struct Snapshot { + pub create_every: Duration, + pub store_dir: PathBuf, + pub creation_enabled: bool, +} + +#[derive(Debug, Copy, Clone)] +pub struct ChainWide { + pub max_transactions_in_block: NonZeroU32, + pub block_time: Duration, + pub commit_time: Duration, + pub transaction_limits: TransactionLimits, + pub asset_metadata_limits: MetadataLimits, + pub asset_definition_metadata_limits: MetadataLimits, + pub account_metadata_limits: MetadataLimits, + pub domain_metadata_limits: MetadataLimits, + pub ident_length_limits: LengthLimits, + pub wasm_fuel_limit: u64, + pub wasm_max_memory: HumanBytes, +} + +impl ChainWide { + fn parse(self) -> actual::ChainWide { + let Self { + max_transactions_in_block, + block_time, + commit_time, + transaction_limits, + asset_metadata_limits, + asset_definition_metadata_limits, + account_metadata_limits, + domain_metadata_limits, + ident_length_limits: identifier_length_limits, + wasm_fuel_limit, + wasm_max_memory, + } = self; + + actual::ChainWide { + max_transactions_in_block, + block_time, + commit_time, + transaction_limits, + asset_metadata_limits, + asset_definition_metadata_limits, + account_metadata_limits, + domain_metadata_limits, + ident_length_limits: identifier_length_limits, + wasm_runtime: actual::WasmRuntime { + fuel_limit: wasm_fuel_limit, + max_memory_bytes: wasm_max_memory.get(), + }, + } + } +} + +#[derive(Debug)] +pub struct Torii { + pub address: SocketAddr, + pub max_content_len: HumanBytes, + pub query_idle_time: Duration, +} + +impl Torii { + fn parse(self) -> (actual::Torii, actual::LiveQueryStore) { + let torii = actual::Torii { + address: self.address, + max_content_len_bytes: self.max_content_len.get(), + }; + + let query = actual::LiveQueryStore { + idle_time: self.query_idle_time, + }; + + (torii, query) + } +} + +#[cfg(test)] +mod tests { + use iroha_config_base::{FromEnv, TestEnv}; + + use super::super::user::boilerplate::RootPartial; + + #[test] + fn parses_private_key_from_env() { + let env = TestEnv::new() + .set("PRIVATE_KEY_DIGEST", "ed25519") + .set("PRIVATE_KEY_PAYLOAD", "8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"); + + let private_key = RootPartial::from_env(&env) + .expect("input is valid, should not fail") + .private_key + .get() + .expect("private key is provided, should not fail"); + + let (algorithm, payload) = private_key.to_raw(); + assert_eq!(algorithm, "ed25519".parse().unwrap()); + assert_eq!(hex::encode(payload), "8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"); + } + + #[test] + fn fails_to_parse_private_key_in_env_without_digest() { + let env = TestEnv::new().set("PRIVATE_KEY_DIGEST", "ed25519"); + let error = + RootPartial::from_env(&env).expect_err("private key is incomplete, should fail"); + let expected = expect_test::expect![ + "`PRIVATE_KEY_DIGEST` env was provided, but `PRIVATE_KEY_PAYLOAD` was not" + ]; + expected.assert_eq(&format!("{error:#}")); + } + + #[test] + fn fails_to_parse_private_key_in_env_without_payload() { + let env = TestEnv::new().set("PRIVATE_KEY_PAYLOAD", "8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"); + let error = + RootPartial::from_env(&env).expect_err("private key is incomplete, should fail"); + let expected = expect_test::expect![ + "`PRIVATE_KEY_PAYLOAD` env was provided, but `PRIVATE_KEY_DIGEST` was not" + ]; + expected.assert_eq(&format!("{error:#}")); + } + + #[test] + fn fails_to_parse_private_key_from_env_with_invalid_payload() { + let env = TestEnv::new() + .set("PRIVATE_KEY_DIGEST", "ed25519") + .set("PRIVATE_KEY_PAYLOAD", "foo"); + + let error = RootPartial::from_env(&env).expect_err("input is invalid, should fail"); + + let expected = expect_test::expect!["failed to construct `iroha.private_key` from `PRIVATE_KEY_DIGEST` and `PRIVATE_KEY_PAYLOAD` environment variables"]; + expected.assert_eq(&format!("{error:#}")); + } + + #[test] + fn when_payload_provided_but_digest_is_invalid() { + let env = TestEnv::new() + .set("PRIVATE_KEY_DIGEST", "foo") + .set("PRIVATE_KEY_PAYLOAD", "8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"); + + let error = RootPartial::from_env(&env).expect_err("input is invalid, should fail"); + + // TODO: print the bad value and supported ones + let expected = expect_test::expect!["failed to parse `iroha.private_key.digest_function` field from `PRIVATE_KEY_DIGEST` env variable"]; + expected.assert_eq(&format!("{error:#}")); + } + + #[test] + fn deserialize_empty_input_works() { + let _layer: RootPartial = toml::from_str("").unwrap(); + } + + #[test] + fn deserialize_network_namespace_with_not_all_fields_works() { + let _layer: RootPartial = toml::toml! { + [network] + address = "127.0.0.1:8080" + } + .try_into() + .expect("should not fail when not all fields in `network` are presented at a time"); + } +} diff --git a/config/src/parameters/user/boilerplate.rs b/config/src/parameters/user/boilerplate.rs new file mode 100644 index 00000000000..64b81d9d6ff --- /dev/null +++ b/config/src/parameters/user/boilerplate.rs @@ -0,0 +1,766 @@ +//! Code that should be generated by a procmacro in future. + +#![allow(missing_docs)] + +use std::{ + error::Error, + num::{NonZeroU32, NonZeroUsize}, + path::PathBuf, +}; + +use eyre::{eyre, Report, WrapErr}; +use iroha_config_base::{ + Emitter, ErrorsCollection, ExtendsPaths, FromEnv, FromEnvDefaultFallback, FromEnvResult, + HumanBytes, HumanDuration, Merge, MissingFieldError, ParseEnvResult, ReadEnv, UnwrapPartial, + UnwrapPartialResult, UserField, +}; +use iroha_crypto::{PrivateKey, PublicKey}; +use iroha_data_model::{ + metadata::Limits as MetadataLimits, + prelude::{ChainId, PeerId}, + transaction::TransactionLimits, + LengthLimits, Level, +}; +use iroha_primitives::addr::SocketAddr; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::{ + kura::Mode, + logger::Format, + parameters::{ + defaults::{self, chain_wide::*, network::*, queue::*, torii::*}, + user, + user::{ + ChainWide, Genesis, Kura, KuraDebug, Logger, Network, Queue, Root, Snapshot, Sumeragi, + SumeragiDebug, Telemetry, TelemetryDev, Torii, + }, + }, +}; + +#[derive(Deserialize, Serialize, Debug, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct RootPartial { + pub extends: Option, + pub chain_id: UserField, + pub public_key: UserField, + pub private_key: UserField, + pub genesis: GenesisPartial, + pub kura: KuraPartial, + pub sumeragi: SumeragiPartial, + pub network: NetworkPartial, + pub logger: LoggerPartial, + pub queue: QueuePartial, + pub snapshot: SnapshotPartial, + pub telemetry: TelemetryPartial, + pub torii: ToriiPartial, + pub chain_wide: ChainWidePartial, +} + +impl RootPartial { + /// Creates new empty user configuration + pub fn new() -> Self { + // TODO: generate this function with macro. For now, use default + Self::default() + } +} + +impl UnwrapPartial for RootPartial { + type Output = Root; + + fn unwrap_partial(self) -> UnwrapPartialResult { + let mut emitter = Emitter::new(); + + macro_rules! nested { + ($item:expr) => { + match UnwrapPartial::unwrap_partial($item) { + Ok(value) => Some(value), + Err(error) => { + emitter.emit_collection(error); + None + } + } + }; + } + + if self.chain_id.is_none() { + emitter.emit_missing_field("chain_id"); + } + if self.public_key.is_none() { + emitter.emit_missing_field("public_key"); + } + if self.private_key.is_none() { + emitter.emit_missing_field("private_key"); + } + + let genesis = nested!(self.genesis); + let kura = nested!(self.kura); + let sumeragi = nested!(self.sumeragi); + let network = nested!(self.network); + let logger = nested!(self.logger); + let queue = nested!(self.queue); + let snapshot = nested!(self.snapshot); + let telemetry = nested!(self.telemetry); + let torii = nested!(self.torii); + let chain_wide = nested!(self.chain_wide); + + emitter.finish()?; + + Ok(Root { + chain_id: self.chain_id.get().unwrap(), + public_key: self.public_key.get().unwrap(), + private_key: self.private_key.get().unwrap(), + genesis: genesis.unwrap(), + kura: kura.unwrap(), + sumeragi: sumeragi.unwrap(), + telemetry: telemetry.unwrap(), + logger: logger.unwrap(), + queue: queue.unwrap(), + snapshot: snapshot.unwrap(), + torii: torii.unwrap(), + network: network.unwrap(), + chain_wide: chain_wide.unwrap(), + }) + } +} + +impl FromEnv for RootPartial { + fn from_env>(env: &R) -> FromEnvResult { + fn from_env_nested(env: &R, emitter: &mut Emitter) -> Option + where + T: FromEnv, + R: ReadEnv, + RE: Error, + { + match FromEnv::from_env(env) { + Ok(parsed) => Some(parsed), + Err(errors) => { + emitter.emit_collection(errors); + None + } + } + } + + let mut emitter = Emitter::new(); + + let chain_id = env + .read_env("CHAIN_ID") + .map_err(|e| eyre!("{e}")) + .wrap_err("failed to read CHAIN_ID field (iroha.chain_id param)") + .map_or_else( + |err| { + emitter.emit(err); + None + }, + |maybe_value| maybe_value.map(ChainId::from), + ) + .into(); + let public_key = + ParseEnvResult::parse_simple(&mut emitter, env, "PUBLIC_KEY", "iroha.public_key") + .into(); + let private_key = + user::private_key_from_env(&mut emitter, env, "PRIVATE_KEY", "iroha.private_key") + .into(); + + let genesis = from_env_nested(env, &mut emitter); + let kura = from_env_nested(env, &mut emitter); + let sumeragi = from_env_nested(env, &mut emitter); + let network = from_env_nested(env, &mut emitter); + let logger = from_env_nested(env, &mut emitter); + let queue = from_env_nested(env, &mut emitter); + let snapshot = from_env_nested(env, &mut emitter); + let telemetry = from_env_nested(env, &mut emitter); + let torii = from_env_nested(env, &mut emitter); + let chain_wide = from_env_nested(env, &mut emitter); + + emitter.finish()?; + + Ok(Self { + extends: None, + chain_id, + public_key, + private_key, + genesis: genesis.unwrap(), + kura: kura.unwrap(), + sumeragi: sumeragi.unwrap(), + network: network.unwrap(), + logger: logger.unwrap(), + queue: queue.unwrap(), + snapshot: snapshot.unwrap(), + telemetry: telemetry.unwrap(), + torii: torii.unwrap(), + chain_wide: chain_wide.unwrap(), + }) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct GenesisPartial { + pub public_key: UserField, + pub private_key: UserField, + pub file: UserField, +} + +impl UnwrapPartial for GenesisPartial { + type Output = Genesis; + + fn unwrap_partial(self) -> UnwrapPartialResult { + let public_key = self + .public_key + .get() + .ok_or_else(|| MissingFieldError::new("genesis.public_key"))?; + + let private_key = self.private_key.get(); + let file = self.file.get(); + + Ok(Genesis { + public_key, + private_key, + file, + }) + } +} + +impl FromEnv for GenesisPartial { + fn from_env>(env: &R) -> FromEnvResult + where + Self: Sized, + { + let mut emitter = Emitter::new(); + + let public_key = ParseEnvResult::parse_simple( + &mut emitter, + env, + "GENESIS_PUBLIC_KEY", + "genesis.public_key", + ) + .into(); + let private_key = user::private_key_from_env( + &mut emitter, + env, + "GENESIS_PRIVATE_KEY", + "genesis.private_key", + ) + .into(); + let file = + ParseEnvResult::parse_simple(&mut emitter, env, "GENESIS_FILE", "genesis.file").into(); + + emitter.finish()?; + + Ok(Self { + public_key, + private_key, + file, + }) + } +} + +/// `Kura` configuration. +#[derive(Clone, Deserialize, Serialize, Debug, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct KuraPartial { + pub init_mode: UserField, + pub store_dir: UserField, + pub debug: KuraDebugPartial, +} + +impl UnwrapPartial for KuraPartial { + type Output = Kura; + + fn unwrap_partial(self) -> Result> { + let mut emitter = Emitter::new(); + + let init_mode = self.init_mode.unwrap_or_default(); + + let store_dir = self + .store_dir + .get() + .unwrap_or_else(|| PathBuf::from(defaults::kura::DEFAULT_STORE_DIR)); + + let debug = UnwrapPartial::unwrap_partial(self.debug).map_or_else( + |err| { + emitter.emit_collection(err); + None + }, + Some, + ); + + emitter.finish()?; + + Ok(Kura { + init_mode, + store_dir, + debug: debug.unwrap(), + }) + } +} + +#[derive(Clone, Deserialize, Serialize, Debug, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct KuraDebugPartial { + output_new_blocks: UserField, +} + +impl UnwrapPartial for KuraDebugPartial { + type Output = KuraDebug; + + fn unwrap_partial(self) -> Result> { + Ok(KuraDebug { + output_new_blocks: self.output_new_blocks.unwrap_or(false), + }) + } +} + +impl FromEnv for KuraPartial { + fn from_env>(env: &R) -> FromEnvResult + where + Self: Sized, + { + let mut emitter = Emitter::new(); + + let init_mode = + ParseEnvResult::parse_simple(&mut emitter, env, "KURA_INIT_MODE", "kura.init_mode") + .into(); + let store_dir = + ParseEnvResult::parse_simple(&mut emitter, env, "KURA_STORE_DIR", "kura.store_dir") + .into(); + let debug_output_new_blocks = ParseEnvResult::parse_simple( + &mut emitter, + env, + "KURA_DEBUG_OUTPUT_NEW_BLOCKS", + "kura.debug.output_new_blocks", + ) + .into(); + + emitter.finish()?; + + Ok(Self { + init_mode, + store_dir, + debug: KuraDebugPartial { + output_new_blocks: debug_output_new_blocks, + }, + }) + } +} + +#[derive(Deserialize, Serialize, Debug, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct SumeragiPartial { + pub trusted_peers: UserField>, + pub debug: SumeragiDebugPartial, +} + +impl UnwrapPartial for SumeragiPartial { + type Output = Sumeragi; + + fn unwrap_partial(self) -> UnwrapPartialResult { + let mut emitter = Emitter::new(); + + let debug = self.debug.unwrap_partial().map_or_else( + |err| { + emitter.emit_collection(err); + None + }, + Some, + ); + + emitter.finish()?; + + Ok(Sumeragi { + trusted_peers: self.trusted_peers.get(), + debug: debug.unwrap(), + }) + } +} + +#[derive(Deserialize, Serialize, Debug, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct SumeragiDebugPartial { + pub force_soft_fork: UserField, +} + +impl UnwrapPartial for SumeragiDebugPartial { + type Output = SumeragiDebug; + + fn unwrap_partial(self) -> UnwrapPartialResult { + Ok(SumeragiDebug { + force_soft_fork: self.force_soft_fork.unwrap_or(false), + }) + } +} + +impl FromEnvDefaultFallback for SumeragiPartial {} + +#[derive(Deserialize, Serialize, Debug, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct NetworkPartial { + pub address: UserField, + pub block_gossip_max_size: UserField, + pub block_gossip_period: UserField, + pub transaction_gossip_max_size: UserField, + pub transaction_gossip_period: UserField, +} + +impl UnwrapPartial for NetworkPartial { + type Output = Network; + + fn unwrap_partial(self) -> UnwrapPartialResult { + if self.address.is_none() { + return Err(MissingFieldError::new("network.address").into()); + } + + Ok(Network { + address: self.address.get().unwrap(), + block_gossip_period: self + .block_gossip_period + .map(HumanDuration::get) + .unwrap_or(DEFAULT_BLOCK_GOSSIP_PERIOD), + transaction_gossip_period: self + .transaction_gossip_period + .map(HumanDuration::get) + .unwrap_or(DEFAULT_TRANSACTION_GOSSIP_PERIOD), + transaction_gossip_max_size: self + .transaction_gossip_max_size + .get() + .unwrap_or(DEFAULT_MAX_TRANSACTIONS_PER_GOSSIP), + block_gossip_max_size: self + .block_gossip_max_size + .get() + .unwrap_or(DEFAULT_MAX_BLOCKS_PER_GOSSIP), + }) + } +} + +impl FromEnv for NetworkPartial { + fn from_env>(env: &R) -> FromEnvResult + where + Self: Sized, + { + let mut emitter = Emitter::new(); + + // TODO: also parse `NETWORK_ADDRESS`? + let address = + ParseEnvResult::parse_simple(&mut emitter, env, "P2P_ADDRESS", "network.address") + .into(); + + emitter.finish()?; + + Ok(Self { + address, + ..Self::default() + }) + } +} + +#[derive(Deserialize, Serialize, Debug, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct QueuePartial { + /// The upper limit of the number of transactions waiting in the queue. + pub capacity: UserField, + /// The upper limit of the number of transactions waiting in the queue for single user. + /// Use this option to apply throttling. + pub capacity_per_user: UserField, + /// The transaction will be dropped after this time if it is still in the queue. + pub transaction_time_to_live: UserField, + /// The threshold to determine if a transaction has been tampered to have a future timestamp. + pub future_threshold: UserField, +} + +impl UnwrapPartial for QueuePartial { + type Output = Queue; + + fn unwrap_partial(self) -> UnwrapPartialResult { + Ok(Queue { + capacity: self.capacity.unwrap_or(DEFAULT_MAX_TRANSACTIONS_IN_QUEUE), + capacity_per_user: self + .capacity_per_user + .unwrap_or(DEFAULT_MAX_TRANSACTIONS_IN_QUEUE), + transaction_time_to_live: self + .transaction_time_to_live + .map_or(DEFAULT_TRANSACTION_TIME_TO_LIVE, HumanDuration::get), + future_threshold: self + .future_threshold + .map_or(DEFAULT_FUTURE_THRESHOLD, HumanDuration::get), + }) + } +} + +impl FromEnvDefaultFallback for QueuePartial {} + +/// 'Logger' configuration. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Default, Merge)] +// `tokio_console_addr` is not `Copy`, but warning appears without `tokio-console` feature +#[allow(missing_copy_implementations)] +#[serde(deny_unknown_fields, default)] +pub struct LoggerPartial { + /// Level of logging verbosity + pub level: UserField, + /// Output format + pub format: UserField, + #[cfg(feature = "tokio-console")] + /// Address of tokio console (only available under "tokio-console" feature) + pub tokio_console_address: UserField, +} + +impl UnwrapPartial for LoggerPartial { + type Output = Logger; + + fn unwrap_partial(self) -> UnwrapPartialResult { + Ok(Logger { + level: self.level.unwrap_or_default(), + format: self.format.unwrap_or_default(), + #[cfg(feature = "tokio-console")] + tokio_console_address: self.tokio_console_address.get().unwrap_or_else(|| { + super::super::defaults::logger::DEFAULT_TOKIO_CONSOLE_ADDR.clone() + }), + }) + } +} + +impl FromEnv for LoggerPartial { + fn from_env>(env: &R) -> FromEnvResult + where + Self: Sized, + { + let mut emitter = Emitter::new(); + + let level = + ParseEnvResult::parse_simple(&mut emitter, env, "LOG_LEVEL", "logger.level").into(); + let format = + ParseEnvResult::parse_simple(&mut emitter, env, "LOG_FORMAT", "logger.format").into(); + + emitter.finish()?; + + #[allow(clippy::needless_update)] // triggers if tokio console addr is feature-gated + Ok(Self { + level, + format, + ..Self::default() + }) + } +} + +#[derive(Clone, Deserialize, Serialize, Debug, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct TelemetryPartial { + pub name: UserField, + pub url: UserField, + pub min_retry_period: UserField, + pub max_retry_delay_exponent: UserField, + pub dev: TelemetryDevPartial, +} + +#[derive(Clone, Deserialize, Serialize, Debug, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct TelemetryDevPartial { + pub out_file: UserField, +} + +impl UnwrapPartial for TelemetryDevPartial { + type Output = TelemetryDev; + + fn unwrap_partial(self) -> UnwrapPartialResult { + Ok(TelemetryDev { + out_file: self.out_file.get(), + }) + } +} + +impl UnwrapPartial for TelemetryPartial { + type Output = Telemetry; + + fn unwrap_partial(self) -> UnwrapPartialResult { + let Self { + name, + url, + max_retry_delay_exponent, + min_retry_period, + dev, + } = self; + + Ok(Telemetry { + name: name.get(), + url: url.get(), + max_retry_delay_exponent: max_retry_delay_exponent.get(), + min_retry_period: min_retry_period.get().map(HumanDuration::get), + dev: dev.unwrap_partial()?, + }) + } +} + +impl FromEnvDefaultFallback for TelemetryPartial {} + +#[derive(Debug, Clone, Deserialize, Serialize, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct SnapshotPartial { + pub create_every: UserField, + pub store_dir: UserField, + pub creation_enabled: UserField, +} + +impl UnwrapPartial for SnapshotPartial { + type Output = Snapshot; + + fn unwrap_partial(self) -> UnwrapPartialResult { + Ok(Snapshot { + creation_enabled: self + .creation_enabled + .unwrap_or(defaults::snapshot::DEFAULT_ENABLED), + create_every: self + .create_every + .get() + .map_or(defaults::snapshot::DEFAULT_CREATE_EVERY, HumanDuration::get), + store_dir: self + .store_dir + .get() + .unwrap_or_else(|| PathBuf::from(defaults::snapshot::DEFAULT_STORE_DIR)), + }) + } +} + +impl FromEnv for SnapshotPartial { + fn from_env>(env: &R) -> FromEnvResult + where + Self: Sized, + { + let mut emitter = Emitter::new(); + + let store_dir = ParseEnvResult::parse_simple( + &mut emitter, + env, + "SNAPSHOT_STORE_DIR", + "snapshot.store_dir", + ) + .into(); + let creation_enabled = ParseEnvResult::parse_simple( + &mut emitter, + env, + "SNAPSHOT_CREATION_ENABLED", + "snapshot.creation_enabled", + ) + .into(); + + emitter.finish()?; + + Ok(Self { + store_dir, + creation_enabled, + ..Self::default() + }) + } +} + +#[derive(Deserialize, Serialize, Debug, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct ChainWidePartial { + pub max_transactions_in_block: UserField, + pub block_time: UserField, + pub commit_time: UserField, + pub transaction_limits: UserField, + pub asset_metadata_limits: UserField, + pub asset_definition_metadata_limits: UserField, + pub account_metadata_limits: UserField, + pub domain_metadata_limits: UserField, + pub ident_length_limits: UserField, + pub wasm_fuel_limit: UserField, + pub wasm_max_memory: UserField>, +} + +impl UnwrapPartial for ChainWidePartial { + type Output = ChainWide; + + fn unwrap_partial(self) -> UnwrapPartialResult { + Ok(ChainWide { + max_transactions_in_block: self.max_transactions_in_block.unwrap_or(DEFAULT_MAX_TXS), + block_time: self + .block_time + .map_or(DEFAULT_BLOCK_TIME, HumanDuration::get), + commit_time: self + .commit_time + .map_or(DEFAULT_COMMIT_TIME, HumanDuration::get), + transaction_limits: self + .transaction_limits + .unwrap_or(DEFAULT_TRANSACTION_LIMITS), + asset_metadata_limits: self + .asset_metadata_limits + .unwrap_or(DEFAULT_METADATA_LIMITS), + asset_definition_metadata_limits: self + .asset_definition_metadata_limits + .unwrap_or(DEFAULT_METADATA_LIMITS), + account_metadata_limits: self + .account_metadata_limits + .unwrap_or(DEFAULT_METADATA_LIMITS), + domain_metadata_limits: self + .domain_metadata_limits + .unwrap_or(DEFAULT_METADATA_LIMITS), + ident_length_limits: self + .ident_length_limits + .unwrap_or(DEFAULT_IDENT_LENGTH_LIMITS), + wasm_fuel_limit: self.wasm_fuel_limit.unwrap_or(DEFAULT_WASM_FUEL_LIMIT), + wasm_max_memory: self + .wasm_max_memory + .unwrap_or(HumanBytes(DEFAULT_WASM_MAX_MEMORY_BYTES)), + }) + } +} + +impl FromEnvDefaultFallback for ChainWidePartial {} + +#[derive(Debug, Clone, Deserialize, Serialize, Default, Merge)] +#[serde(deny_unknown_fields, default)] +pub struct ToriiPartial { + pub address: UserField, + pub max_content_len: UserField>, + pub query_idle_time: UserField, +} + +impl UnwrapPartial for ToriiPartial { + type Output = Torii; + + fn unwrap_partial(self) -> UnwrapPartialResult { + let mut emitter = Emitter::new(); + + if self.address.is_none() { + emitter.emit_missing_field("torii.address"); + } + + let max_content_len = self + .max_content_len + .get() + .unwrap_or(HumanBytes(DEFAULT_MAX_CONTENT_LENGTH)); + + let query_idle_time = self + .query_idle_time + .map(HumanDuration::get) + .unwrap_or(DEFAULT_QUERY_IDLE_TIME); + + emitter.finish()?; + + Ok(Torii { + address: self.address.get().unwrap(), + max_content_len, + query_idle_time, + }) + } +} + +impl FromEnv for ToriiPartial { + fn from_env>(env: &R) -> FromEnvResult + where + Self: Sized, + { + let mut emitter = Emitter::new(); + + let address = + ParseEnvResult::parse_simple(&mut emitter, env, "API_ADDRESS", "torii.address").into(); + + emitter.finish()?; + + Ok(Self { + address, + ..Self::default() + }) + } +} diff --git a/config/src/path.rs b/config/src/path.rs deleted file mode 100644 index 23f1bd80b57..00000000000 --- a/config/src/path.rs +++ /dev/null @@ -1,152 +0,0 @@ -//! Module with configuration path related structures. - -extern crate alloc; - -use alloc::borrow::Cow; -use std::path::PathBuf; - -use InnerPath::*; - -/// Allowed configuration file extension that user can provide. -pub const ALLOWED_CONFIG_EXTENSIONS: [&str; 2] = ["json", "json5"]; - -/// Error type for [`Path`]. -#[derive(Debug, Clone, thiserror::Error, displaydoc::Display)] -pub enum Error { - /// File doesn't have an extension. Allowed file extensions are: {ALLOWED_CONFIG_EXTENSIONS:?} - MissingExtension, - /// Provided config file has an unsupported file extension `{0}`. Allowed extensions are: {ALLOWED_CONFIG_EXTENSIONS:?}. - InvalidExtension(String), - /// User-provided file `{0}` is not found. - FileNotFound(String), -} - -/// Result type for [`Path`] constructors. -pub type Result = std::result::Result; - -/// Inner helper struct. -/// -/// With this struct, we force to use [`Path`]'s constructors instead of constructing it directly. -#[derive(Debug, Clone, PartialEq)] -enum InnerPath { - /// Contains path without an extension, so that it will try to resolve - /// using [`ALLOWED_CONFIG_EXTENSIONS`]. [`Path::try_resolve()`] will not fail if file isn't - /// found. - Default(PathBuf), - /// Contains full path, with extension. [`Path::try_resolve()`] will fail if not found. - UserProvided(PathBuf), -} - -/// Wrapper around path to config file (e.g. `config.json`). -/// -/// Provides abstraction above user-provided config and default ones. -#[derive(Debug, Clone, PartialEq)] -pub struct Path(InnerPath); - -impl core::fmt::Display for Path { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match &self.0 { - Default(path) => { - write!( - f, - "{}.{{{}}}", - path.display(), - ALLOWED_CONFIG_EXTENSIONS.join(",") - ) - } - UserProvided(path) => write!(f, "{}", path.display()), - } - } -} - -impl Path { - /// Construct new [`Path`] which will try to resolve multiple allowed extensions and will not - /// fail resolution ([`Self::try_resolve()`]) if file is not found. - /// - /// The path should not have an extension. - /// - /// # Panics - /// If the path has an extension. - pub fn default(path: impl AsRef) -> Self { - let path = path.as_ref().to_path_buf(); - assert!( - path.extension().is_none(), - "Default config path is not supposed to have an extension. It is a bug." - ); - Self(Default(path)) - } - - /// Construct new [`Path`] from user-provided `path` which will fail to [`Self::try_resolve()`] - /// if file is not found. - /// - /// # Errors - /// If `path`'s extension is absent or unsupported. - pub fn user_provided(path: impl AsRef) -> Result { - let path = path.as_ref(); - - let extension = path - .extension() - .ok_or(Error::MissingExtension)? - .to_string_lossy(); - if !ALLOWED_CONFIG_EXTENSIONS.contains(&extension.as_ref()) { - return Err(Error::InvalidExtension(extension.into_owned())); - } - - Ok(Self(UserProvided(path.to_path_buf()))) - } - - /// Same as [`Self::user_provided()`], but accepts `&str` (useful for clap) - /// - /// # Errors - /// See [`Self::user_provided()`] - pub fn user_provided_str(raw: &str) -> Result { - Self::user_provided(raw) - } - - /// Try to get first existing path by applying possible extensions if there are any. - /// - /// # Errors - /// If user-provided path is not found - pub fn try_resolve(&self) -> Result>> { - match &self.0 { - Default(path) => { - let maybe = ALLOWED_CONFIG_EXTENSIONS.iter().find_map(|extension| { - let path_ext = path.with_extension(extension); - path_ext.exists().then_some(Cow::Owned(path_ext)) - }); - Ok(maybe) - } - UserProvided(path) => { - if path.exists() { - Ok(Some(Cow::Borrowed(path))) - } else { - Err(Error::FileNotFound(path.to_string_lossy().into_owned())) - } - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn display_multi_extensions() { - let path = Path::default("config"); - - let display = format!("{path}"); - - assert_eq!(display, "config.{json,json5}") - } - - #[test] - fn display_strict_extension() { - let path = - Path::user_provided("config.json").expect("Should be valid since extension is valid"); - - let display = format!("{path}"); - - assert_eq!(display, "config.json") - } -} diff --git a/config/src/queue.rs b/config/src/queue.rs deleted file mode 100644 index 5803e90ed7c..00000000000 --- a/config/src/queue.rs +++ /dev/null @@ -1,55 +0,0 @@ -//! Module for `Queue`-related configuration and structs. -use iroha_config_base::derive::Proxy; -use serde::{Deserialize, Serialize}; - -const DEFAULT_MAX_TRANSACTIONS_IN_QUEUE: u32 = 2_u32.pow(16); -const DEFAULT_MAX_TRANSACTIONS_IN_QUEUE_PER_USER: u32 = 2_u32.pow(16); -const DEFAULT_TRANSACTION_TIME_TO_LIVE_MS: u64 = 24 * 60 * 60 * 1000; // 24 hours -const DEFAULT_FUTURE_THRESHOLD_MS: u64 = 1000; - -/// `Queue` configuration. -#[derive(Copy, Clone, Deserialize, Serialize, Debug, Proxy, PartialEq, Eq)] -#[serde(rename_all = "UPPERCASE")] -#[config(env_prefix = "QUEUE_")] -pub struct Configuration { - /// The upper limit of the number of transactions waiting in the queue. - pub max_transactions_in_queue: u32, - /// The upper limit of the number of transactions waiting in the queue for single user. - /// Use this option to apply throttling. - pub max_transactions_in_queue_per_user: u32, - /// The transaction will be dropped after this time if it is still in the queue. - pub transaction_time_to_live_ms: u64, - /// The threshold to determine if a transaction has been tampered to have a future timestamp. - pub future_threshold_ms: u64, -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - max_transactions_in_queue: Some(DEFAULT_MAX_TRANSACTIONS_IN_QUEUE), - max_transactions_in_queue_per_user: Some(DEFAULT_MAX_TRANSACTIONS_IN_QUEUE_PER_USER), - transaction_time_to_live_ms: Some(DEFAULT_TRANSACTION_TIME_TO_LIVE_MS), - future_threshold_ms: Some(DEFAULT_FUTURE_THRESHOLD_MS), - } - } -} - -#[cfg(test)] -pub mod tests { - use proptest::prelude::*; - - use super::*; - - prop_compose! { - pub fn arb_proxy() - ( - max_transactions_in_queue in prop::option::of(Just(DEFAULT_MAX_TRANSACTIONS_IN_QUEUE)), - max_transactions_in_queue_per_user in prop::option::of(Just(DEFAULT_MAX_TRANSACTIONS_IN_QUEUE_PER_USER)), - transaction_time_to_live_ms in prop::option::of(Just(DEFAULT_TRANSACTION_TIME_TO_LIVE_MS)), - future_threshold_ms in prop::option::of(Just(DEFAULT_FUTURE_THRESHOLD_MS)), - ) - -> ConfigurationProxy { - ConfigurationProxy { max_transactions_in_queue, max_transactions_in_queue_per_user, transaction_time_to_live_ms, future_threshold_ms } - } - } -} diff --git a/config/src/snapshot.rs b/config/src/snapshot.rs deleted file mode 100644 index e828e5635d0..00000000000 --- a/config/src/snapshot.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! Module for `SnapshotMaker`-related configuration and structs. - -use std::path::PathBuf; - -use iroha_config_base::derive::Proxy; -use serde::{Deserialize, Serialize}; - -const DEFAULT_SNAPSHOT_PATH: &str = "./storage"; -// Default frequency of making snapshots is 1 minute, need to be adjusted for larger world state view size -const DEFAULT_SNAPSHOT_CREATE_EVERY_MS: u64 = 1000 * 60; -const DEFAULT_ENABLED: bool = true; - -/// Configuration for `SnapshotMaker`. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] -#[serde(rename_all = "UPPERCASE")] -#[config(env_prefix = "SNAPSHOT_")] -pub struct Configuration { - /// The period of time to wait between attempts to create new snapshot. - pub create_every_ms: u64, - /// Path to the directory where snapshots should be stored - #[config(serde_as_str)] - pub dir_path: PathBuf, - /// Flag to enable or disable snapshot creation - pub creation_enabled: bool, -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - create_every_ms: Some(DEFAULT_SNAPSHOT_CREATE_EVERY_MS), - dir_path: Some(DEFAULT_SNAPSHOT_PATH.into()), - creation_enabled: Some(DEFAULT_ENABLED), - } - } -} - -#[cfg(test)] -pub mod tests { - use proptest::prelude::*; - - use super::*; - - prop_compose! { - pub fn arb_proxy() - ( - create_every_ms in prop::option::of(Just(DEFAULT_SNAPSHOT_CREATE_EVERY_MS)), - dir_path in prop::option::of(Just(DEFAULT_SNAPSHOT_PATH.into())), - creation_enabled in prop::option::of(Just(DEFAULT_ENABLED)), - ) - -> ConfigurationProxy { - ConfigurationProxy { create_every_ms, dir_path, creation_enabled } - } - } -} diff --git a/config/src/sumeragi.rs b/config/src/sumeragi.rs deleted file mode 100644 index a4eb7760069..00000000000 --- a/config/src/sumeragi.rs +++ /dev/null @@ -1,186 +0,0 @@ -//! `Sumeragi` configuration. Contains both block commit and Gossip-related configuration. -use std::{fmt::Debug, fs::File, io::BufReader, path::Path}; - -use eyre::{Result, WrapErr}; -use iroha_config_base::derive::{view, Proxy}; -use iroha_crypto::prelude::*; -use iroha_data_model::prelude::*; -use iroha_primitives::{unique_vec, unique_vec::UniqueVec}; -use serde::{Deserialize, Serialize}; - -use self::default::*; - -/// Module with a set of default values. -pub mod default { - /// Default number of miliseconds the peer waits for transactions before creating a block. - pub const DEFAULT_BLOCK_TIME_MS: u64 = 2000; - /// Default amount of time allocated for voting on a block before a peer can ask for a view change. - pub const DEFAULT_COMMIT_TIME_LIMIT_MS: u64 = 4000; - /// Unused const. Should be removed in the future. - pub const DEFAULT_ACTOR_CHANNEL_CAPACITY: u32 = 100; - /// Default duration in ms between every transaction gossip. - pub const DEFAULT_GOSSIP_PERIOD_MS: u64 = 1000; - /// Default maximum number of transactions sent in single gossip message. - pub const DEFAULT_GOSSIP_BATCH_SIZE: u32 = 500; - /// Default maximum number of transactions in block. - pub const DEFAULT_MAX_TRANSACTIONS_IN_BLOCK: u32 = 2_u32.pow(9); - - /// Default estimation of consensus duration. - #[allow(clippy::integer_division)] - pub const DEFAULT_CONSENSUS_ESTIMATION_MS: u64 = - DEFAULT_BLOCK_TIME_MS + (DEFAULT_COMMIT_TIME_LIMIT_MS / 2); -} - -// Generate `ConfigurationView` without keys -view! { - /// `Sumeragi` configuration. - /// [`struct@Configuration`] provides an ability to define parameters such as `BLOCK_TIME_MS` - /// and a list of `TRUSTED_PEERS`. - #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] - #[serde(rename_all = "UPPERCASE")] - #[config(env_prefix = "SUMERAGI_")] - pub struct Configuration { - /// The key pair consisting of a private and a public key. - //TODO: consider putting a `#[serde(skip)]` on the proxy struct here - #[view(ignore)] - pub key_pair: KeyPair, - /// Current Peer Identification. - pub peer_id: PeerId, - /// The period of time a peer waits for the `CreatedBlock` message after getting a `TransactionReceipt` - pub block_time_ms: u64, - /// Optional list of predefined trusted peers. - pub trusted_peers: TrustedPeers, - /// The period of time a peer waits for `CommitMessage` from the proxy tail. - pub commit_time_limit_ms: u64, - /// The upper limit of the number of transactions per block. - pub max_transactions_in_block: u32, - /// Buffer capacity of actor's MPSC channel - pub actor_channel_capacity: u32, - /// max number of transactions in tx gossip batch message. While configuring this, pay attention to `p2p` max message size. - pub gossip_batch_size: u32, - /// Period in milliseconds for pending transaction gossiping between peers. - pub gossip_period_ms: u64, - #[cfg(debug_assertions)] - /// Only used in testing. Causes the genesis peer to withhold blocks when it - /// is the proxy tail. - pub debug_force_soft_fork: bool, - } -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - key_pair: None, - peer_id: None, - trusted_peers: None, - block_time_ms: Some(DEFAULT_BLOCK_TIME_MS), - commit_time_limit_ms: Some(DEFAULT_COMMIT_TIME_LIMIT_MS), - actor_channel_capacity: Some(DEFAULT_ACTOR_CHANNEL_CAPACITY), - gossip_batch_size: Some(DEFAULT_GOSSIP_BATCH_SIZE), - gossip_period_ms: Some(DEFAULT_GOSSIP_PERIOD_MS), - max_transactions_in_block: Some(DEFAULT_MAX_TRANSACTIONS_IN_BLOCK), - #[cfg(debug_assertions)] - debug_force_soft_fork: Some(false), - } - } -} -impl ConfigurationProxy { - /// To be used for proxy finalisation. Should only be - /// used if no peers are present. - /// - /// # Panics - /// The [`peer_id`] field of [`Self`] - /// has not been initialized prior to calling this method. - pub fn insert_self_as_trusted_peers(&mut self) { - let peer_id = self - .peer_id - .as_ref() - .expect("Insertion of `self` as `trusted_peers` implies that `peer_id` field should be initialized"); - self.trusted_peers = if let Some(mut trusted_peers) = self.trusted_peers.take() { - trusted_peers.peers.push(peer_id.clone()); - Some(trusted_peers) - } else { - Some(TrustedPeers { - peers: unique_vec![peer_id.clone()], - }) - }; - } -} - -impl Configuration { - /// Time estimation from receiving a transaction to storing it in - /// a block on all peers for the "sunny day" scenario. - #[inline] - #[must_use] - pub const fn pipeline_time_ms(&self) -> u64 { - self.block_time_ms + self.commit_time_limit_ms - } -} - -/// Part of the [`Configuration`]. It is separated from the main structure in order to be able -/// to load it from a separate file (see [`TrustedPeers::from_path`]). -#[derive(Debug, Clone, Default, PartialEq, Eq, Deserialize, Serialize)] -#[serde(rename_all = "UPPERCASE")] -#[serde(transparent)] -#[repr(transparent)] -pub struct TrustedPeers { - /// Optional list of predefined trusted peers. Must contain unique - /// entries. Custom deserializer raises error if duplicates found. - #[serde(deserialize_with = "UniqueVec::display_deserialize_failing_on_duplicates")] - pub peers: UniqueVec, -} - -impl TrustedPeers { - /// Load trusted peers variables from JSON. - /// - /// # Errors - /// - File not found - /// - File is not Valid JSON. - /// - File is valid JSON, but configuration options don't match. - pub fn from_path + Debug>(path: P) -> Result { - let file = File::open(&path) - .wrap_err_with(|| format!("Failed to open trusted peers file {:?}", &path))?; - let reader = BufReader::new(file); - serde_json::from_reader(reader) - .wrap_err("Failed to deserialize json from reader") - .map_err(Into::into) - } -} - -#[cfg(test)] -pub mod tests { - use proptest::prelude::*; - - use super::*; - - prop_compose! { - #[allow(unused_variables)] - pub fn arb_proxy() - (key_pair in Just(None), - peer_id in Just(None), - block_time_ms in prop::option::of(Just(DEFAULT_BLOCK_TIME_MS)), - trusted_peers in Just(None), - commit_time_limit_ms in prop::option::of(Just(DEFAULT_COMMIT_TIME_LIMIT_MS)), - actor_channel_capacity in prop::option::of(Just(DEFAULT_ACTOR_CHANNEL_CAPACITY)), - gossip_batch_size in prop::option::of(Just(DEFAULT_GOSSIP_BATCH_SIZE)), - gossip_period_ms in prop::option::of(Just(DEFAULT_GOSSIP_PERIOD_MS)), - max_transactions_in_block in prop::option::of(Just(DEFAULT_MAX_TRANSACTIONS_IN_BLOCK)), - debug_force_soft_fork in prop::option::of(Just(false)), - ) - -> ConfigurationProxy { - ConfigurationProxy { - key_pair, - peer_id, - block_time_ms, - trusted_peers, - commit_time_limit_ms, - max_transactions_in_block, - actor_channel_capacity, - gossip_batch_size, - gossip_period_ms, - #[cfg(debug_assertions)] - debug_force_soft_fork - } - } - } -} diff --git a/config/src/telemetry.rs b/config/src/telemetry.rs deleted file mode 100644 index b7ce10f9ee4..00000000000 --- a/config/src/telemetry.rs +++ /dev/null @@ -1,117 +0,0 @@ -//! Module for telemetry-related configuration and structs. -use std::path::PathBuf; - -use iroha_config_base::derive::Proxy; -use serde::{Deserialize, Serialize}; -use url::Url; - -/// Configuration parameters container -#[derive(Clone, Deserialize, Serialize, Debug, Proxy, PartialEq, Eq)] -#[serde(rename_all = "UPPERCASE")] -#[config(env_prefix = "TELEMETRY_")] -pub struct Configuration { - /// The node's name to be seen on the telemetry - #[config(serde_as_str)] - pub name: Option, - /// The url of the telemetry, e.g., ws://127.0.0.1:8001/submit - #[config(serde_as_str)] - pub url: Option, - /// The minimum period of time in seconds to wait before reconnecting - pub min_retry_period: u64, - /// The maximum exponent of 2 that is used for increasing delay between reconnections - pub max_retry_delay_exponent: u8, - /// The filepath that to write dev-telemetry to - #[config(serde_as_str)] - pub file: Option, -} - -/// Complete configuration needed to start regular telemetry. -pub struct RegularTelemetryConfig { - #[allow(missing_docs)] - pub name: String, - #[allow(missing_docs)] - pub url: Url, - #[allow(missing_docs)] - pub min_retry_period: u64, - #[allow(missing_docs)] - pub max_retry_delay_exponent: u8, -} - -/// Complete configuration needed to start dev telemetry. -pub struct DevTelemetryConfig { - #[allow(missing_docs)] - pub file: PathBuf, -} - -impl Configuration { - /// Parses user-provided configuration into stronger typed structures - /// - /// Should be refactored with [#3500](https://github.com/hyperledger/iroha/issues/3500) - pub fn parse(&self) -> (Option, Option) { - let Self { - ref name, - ref url, - max_retry_delay_exponent, - min_retry_period, - ref file, - } = *self; - - let regular = if let (Some(name), Some(url)) = (name, url) { - Some(RegularTelemetryConfig { - name: name.clone(), - url: url.clone(), - max_retry_delay_exponent, - min_retry_period, - }) - } else { - None - }; - - let dev = file - .as_ref() - .map(|file| DevTelemetryConfig { file: file.clone() }); - - (regular, dev) - } -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - name: Some(None), - url: Some(None), - min_retry_period: Some(retry_period::DEFAULT_MIN_RETRY_PERIOD), - max_retry_delay_exponent: Some(retry_period::DEFAULT_MAX_RETRY_DELAY_EXPONENT), - file: Some(None), - } - } -} - -/// `RetryPeriod` configuration -pub mod retry_period { - /// Default minimal retry period - pub const DEFAULT_MIN_RETRY_PERIOD: u64 = 1; - /// Default maximum exponent for the retry delay - pub const DEFAULT_MAX_RETRY_DELAY_EXPONENT: u8 = 4; -} - -#[cfg(test)] -pub mod tests { - use proptest::prelude::*; - - use super::*; - - prop_compose! { - pub fn arb_proxy() - ( - name in prop::option::of(Just(None)), - url in prop::option::of(Just(None)), - min_retry_period in prop::option::of(Just(retry_period::DEFAULT_MIN_RETRY_PERIOD)), - max_retry_delay_exponent in prop::option::of(Just(retry_period::DEFAULT_MAX_RETRY_DELAY_EXPONENT)), - file in prop::option::of(Just(None)), - ) - -> ConfigurationProxy { - ConfigurationProxy { name, url, min_retry_period, max_retry_delay_exponent, file } - } - } -} diff --git a/config/src/torii.rs b/config/src/torii.rs deleted file mode 100644 index d77457f0ddb..00000000000 --- a/config/src/torii.rs +++ /dev/null @@ -1,99 +0,0 @@ -//! `Torii` configuration as well as the default values for the URLs used for the main endpoints: `p2p`, `telemetry`, but not `api`. - -use iroha_config_base::derive::Proxy; -use iroha_primitives::addr::{socket_addr, SocketAddr}; -use serde::{Deserialize, Serialize}; - -/// Default socket for p2p communication -pub const DEFAULT_TORII_P2P_ADDR: SocketAddr = socket_addr!(127.0.0.1:1337); -/// Default maximum size of single transaction -pub const DEFAULT_TORII_MAX_TRANSACTION_SIZE: u32 = 2_u32.pow(15); -/// Default upper bound on `content-length` specified in the HTTP request header -pub const DEFAULT_TORII_MAX_CONTENT_LENGTH: u32 = 2_u32.pow(12) * 4000; - -/// Structure that defines the configuration parameters of `Torii` which is the routing module. -/// For example the `p2p_addr`, which is used for consensus and block-synchronisation purposes, -/// as well as `max_transaction_size`. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] -#[serde(rename_all = "UPPERCASE")] -#[config(env_prefix = "TORII_")] -pub struct Configuration { - /// Torii address for p2p communication for consensus and block synchronization purposes. - #[config(serde_as_str)] - pub p2p_addr: SocketAddr, - /// Torii address for client API. - #[config(serde_as_str)] - pub api_url: SocketAddr, - /// Maximum number of bytes in raw transaction. Used to prevent from DOS attacks. - pub max_transaction_size: u32, - /// Maximum number of bytes in raw message. Used to prevent from DOS attacks. - pub max_content_len: u32, -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - p2p_addr: None, - api_url: None, - max_transaction_size: Some(DEFAULT_TORII_MAX_TRANSACTION_SIZE), - max_content_len: Some(DEFAULT_TORII_MAX_CONTENT_LENGTH), - } - } -} - -pub mod uri { - //! URI that `Torii` uses to route incoming requests. - - /// Default socket for listening on external requests - pub const DEFAULT_API_ADDR: iroha_primitives::addr::SocketAddr = - iroha_primitives::addr::socket_addr!(127.0.0.1:8080); - /// Query URI is used to handle incoming Query requests. - pub const QUERY: &str = "query"; - /// Transaction URI is used to handle incoming ISI requests. - pub const TRANSACTION: &str = "transaction"; - /// Block URI is used to handle incoming Block requests. - pub const CONSENSUS: &str = "consensus"; - /// Health URI is used to handle incoming Healthcheck requests. - pub const HEALTH: &str = "health"; - /// The URI used for block synchronization. - pub const BLOCK_SYNC: &str = "block/sync"; - /// The web socket uri used to subscribe to block and transactions statuses. - pub const SUBSCRIPTION: &str = "events"; - /// The web socket uri used to subscribe to blocks stream. - pub const BLOCKS_STREAM: &str = "block/stream"; - /// Get pending transactions. - pub const MATCHING_PENDING_TRANSACTIONS: &str = "matching_pending_transactions"; - /// The URI for local config changing inspecting - pub const CONFIGURATION: &str = "configuration"; - /// URI to report status for administration - pub const STATUS: &str = "status"; - /// Metrics URI is used to export metrics according to [Prometheus - /// Guidance](https://prometheus.io/docs/instrumenting/writing_exporters/). - pub const METRICS: &str = "metrics"; - /// URI for retrieving the schema with which Iroha was built. - pub const SCHEMA: &str = "schema"; - /// URI for getting the API version currently used - pub const API_VERSION: &str = "api_version"; - /// URI for getting cpu profile - pub const PROFILE: &str = "debug/pprof/profile"; -} - -#[cfg(test)] -pub mod tests { - use proptest::prelude::*; - - use super::*; - - prop_compose! { - pub fn arb_proxy() - ( - p2p_addr in prop::option::of(Just(DEFAULT_TORII_P2P_ADDR)), - api_url in prop::option::of(Just(uri::DEFAULT_API_ADDR)), - max_transaction_size in prop::option::of(Just(DEFAULT_TORII_MAX_TRANSACTION_SIZE)), - max_content_len in prop::option::of(Just(DEFAULT_TORII_MAX_CONTENT_LENGTH)), - ) - -> ConfigurationProxy { - ConfigurationProxy { p2p_addr, api_url, max_transaction_size, max_content_len } - } - } -} diff --git a/config/src/wasm.rs b/config/src/wasm.rs index cd55fd989af..8b137891791 100644 --- a/config/src/wasm.rs +++ b/config/src/wasm.rs @@ -1,35 +1 @@ -//! Module for wasm-related configuration and structs. -use iroha_config_base::derive::Proxy; -use serde::{Deserialize, Serialize}; -use self::default::*; - -/// Module with a set of default values. -pub mod default { - /// Default amount of fuel provided for execution - pub const DEFAULT_FUEL_LIMIT: u64 = 55_000_000; - /// Default amount of memory given for smart contract - pub const DEFAULT_MAX_MEMORY: u32 = 500 * 2_u32.pow(20); // 500 MiB -} - -/// `WebAssembly Runtime` configuration. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Proxy)] -#[config(env_prefix = "WASM_")] -#[serde(rename_all = "UPPERCASE")] -pub struct Configuration { - /// The fuel limit determines the maximum number of instructions that can be executed within a smart contract. - /// Every WASM instruction costs approximately 1 unit of fuel. See - /// [`wasmtime` reference](https://docs.rs/wasmtime/0.29.0/wasmtime/struct.Store.html#method.add_fuel) - pub fuel_limit: u64, - /// Maximum amount of linear memory a given smart contract can allocate. - pub max_memory: u32, -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - fuel_limit: Some(DEFAULT_FUEL_LIMIT), - max_memory: Some(DEFAULT_MAX_MEMORY), - } - } -} diff --git a/config/src/wsv.rs b/config/src/wsv.rs deleted file mode 100644 index dcb23b23d85..00000000000 --- a/config/src/wsv.rs +++ /dev/null @@ -1,86 +0,0 @@ -//! Module for `WorldStateView`-related configuration and structs. -use default::*; -use iroha_config_base::derive::Proxy; -use iroha_data_model::{prelude::*, transaction::TransactionLimits}; -use serde::{Deserialize, Serialize}; - -use crate::wasm; - -/// Module with a set of default values. -pub mod default { - use super::*; - - /// Default limits for metadata - pub const DEFAULT_METADATA_LIMITS: MetadataLimits = - MetadataLimits::new(2_u32.pow(20), 2_u32.pow(12)); - /// Default limits for ident length - pub const DEFAULT_IDENT_LENGTH_LIMITS: LengthLimits = LengthLimits::new(1, 2_u32.pow(7)); - /// Default maximum number of instructions and expressions per transaction - pub const DEFAULT_MAX_INSTRUCTION_NUMBER: u64 = 2_u64.pow(12); - /// Default maximum number of instructions and expressions per transaction - pub const DEFAULT_MAX_WASM_SIZE_BYTES: u64 = 2_u64.pow(22); // 4 MiB - - /// Default transaction limits - pub const DEFAULT_TRANSACTION_LIMITS: TransactionLimits = - TransactionLimits::new(DEFAULT_MAX_INSTRUCTION_NUMBER, DEFAULT_MAX_WASM_SIZE_BYTES); -} - -/// `WorldStateView` configuration. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Proxy)] -#[config(env_prefix = "WSV_")] -#[serde(rename_all = "UPPERCASE")] -pub struct Configuration { - /// [`MetadataLimits`] for every asset with store. - pub asset_metadata_limits: MetadataLimits, - /// [`MetadataLimits`] of any asset definition metadata. - pub asset_definition_metadata_limits: MetadataLimits, - /// [`MetadataLimits`] of any account metadata. - pub account_metadata_limits: MetadataLimits, - /// [`MetadataLimits`] of any domain metadata. - pub domain_metadata_limits: MetadataLimits, - /// [`LengthLimits`] for the number of chars in identifiers that can be stored in the WSV. - pub ident_length_limits: LengthLimits, - /// Limits that all transactions need to obey, in terms of size - /// of WASM blob and number of instructions. - pub transaction_limits: TransactionLimits, - /// WASM runtime configuration - #[config(inner)] - pub wasm_runtime_config: wasm::Configuration, -} - -impl Default for ConfigurationProxy { - fn default() -> Self { - Self { - asset_metadata_limits: Some(DEFAULT_METADATA_LIMITS), - asset_definition_metadata_limits: Some(DEFAULT_METADATA_LIMITS), - account_metadata_limits: Some(DEFAULT_METADATA_LIMITS), - domain_metadata_limits: Some(DEFAULT_METADATA_LIMITS), - ident_length_limits: Some(DEFAULT_IDENT_LENGTH_LIMITS), - transaction_limits: Some(DEFAULT_TRANSACTION_LIMITS), - wasm_runtime_config: Some(wasm::ConfigurationProxy::default()), - } - } -} - -#[cfg(test)] -pub mod tests { - use proptest::prelude::*; - - use super::*; - - prop_compose! { - pub fn arb_proxy() - ( - asset_metadata_limits in prop::option::of(Just(DEFAULT_METADATA_LIMITS)), - asset_definition_metadata_limits in prop::option::of(Just(DEFAULT_METADATA_LIMITS)), - account_metadata_limits in prop::option::of(Just(DEFAULT_METADATA_LIMITS)), - domain_metadata_limits in prop::option::of(Just(DEFAULT_METADATA_LIMITS)), - ident_length_limits in prop::option::of(Just(DEFAULT_IDENT_LENGTH_LIMITS)), - transaction_limits in prop::option::of(Just(DEFAULT_TRANSACTION_LIMITS)), - wasm_runtime_config in prop::option::of(Just(wasm::ConfigurationProxy::default())), - ) - -> ConfigurationProxy { - ConfigurationProxy { asset_metadata_limits, asset_definition_metadata_limits, account_metadata_limits, domain_metadata_limits, ident_length_limits, transaction_limits, wasm_runtime_config } - } - } -} diff --git a/config/test/config.toml b/config/test/config.toml new file mode 100644 index 00000000000..d88f7ec119e --- /dev/null +++ b/config/test/config.toml @@ -0,0 +1,7 @@ +[iroha] +public_key = "ed0120FAFCB2B27444221717F6FCBF900D5BE95273B1B0904B08C736B32A19F16AC1F9" +private_key = { digest = "ed25519", payload = "82886B5A2BB3785F3CA8F8A78F60EA9DB62F939937B1CFA8407316EF07909A8D236808A6D4C12C91CA19E54686C2B8F5F3A786278E3824B4571EF234DEC8683B" } +p2p_address = "localhost:1337" + +[torii] +api_address = "localhost:8080" \ No newline at end of file diff --git a/config/tests/fixtures.rs b/config/tests/fixtures.rs new file mode 100644 index 00000000000..214290a1eb2 --- /dev/null +++ b/config/tests/fixtures.rs @@ -0,0 +1,519 @@ +#![allow(clippy::needless_raw_string_hashes)] // triggered by `expect_test` snapshots + +use std::{ + collections::{HashMap, HashSet}, + fs, + path::{Path, PathBuf}, +}; + +use eyre::Result; +use iroha_config::parameters::{ + actual::{Genesis, Root}, + user::{CliContext, RootPartial}, +}; +use iroha_config_base::{FromEnv, TestEnv, UnwrapPartial as _}; + +fn fixtures_dir() -> PathBuf { + // CWD is the crate's root + PathBuf::from("tests/fixtures") +} + +fn parse_env(raw: impl AsRef) -> HashMap { + raw.as_ref() + .lines() + .map(|line| { + let mut items = line.split('='); + let key = items + .next() + .expect("line should be in {key}={value} format"); + let value = items + .next() + .expect("line should be in {key}={value} format"); + (key.to_string(), value.to_string()) + }) + .collect() +} + +fn test_env_from_file(p: impl AsRef) -> TestEnv { + let contents = fs::read_to_string(p).expect("the path should be valid"); + let map = parse_env(contents); + TestEnv::with_map(map) +} + +/// This test not only asserts that the minimal set of fields is enough; +/// it also gives an insight into every single default value +#[test] +#[allow(clippy::too_many_lines)] +fn minimal_config_snapshot() -> Result<()> { + let config = RootPartial::from_toml(fixtures_dir().join("minimal_with_trusted_peers.toml"))? + .unwrap_partial()? + .parse(CliContext { + submit_genesis: false, + })?; + + let expected = expect_test::expect![[r#" + Root { + common: Common { + chain_id: ChainId( + "0", + ), + key_pair: KeyPair { + public_key: PublicKey( + ed25519( + "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB", + ), + ), + private_key: ed25519( + "8F4C15E5D664DA3F13778801D23D4E89B76E94C1B94B389544168B6CB894F84F8BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB", + ), + }, + p2p_address: 127.0.0.1:1337, + }, + genesis: Partial { + public_key: PublicKey( + ed25519( + "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB", + ), + ), + }, + torii: Torii { + address: 127.0.0.1:8080, + max_content_len_bytes: 16777216, + }, + kura: Kura { + init_mode: Strict, + store_dir: "./storage", + debug_output_new_blocks: false, + }, + sumeragi: Sumeragi { + trusted_peers: UniqueVec( + [ + PeerId { + address: 127.0.0.1:1338, + public_key: PublicKey( + ed25519( + "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB", + ), + ), + }, + ], + ), + debug_force_soft_fork: false, + }, + block_sync: BlockSync { + gossip_period: 10s, + gossip_max_size: 4, + }, + transaction_gossiper: TransactionGossiper { + gossip_period: 1s, + gossip_max_size: 500, + }, + live_query_store: LiveQueryStore { + idle_time: 30s, + }, + logger: Logger { + level: INFO, + format: Full, + tokio_console_address: 127.0.0.1:5555, + }, + queue: Queue { + capacity: 65536, + capacity_per_user: 65536, + transaction_time_to_live: 86400s, + future_threshold: 1s, + }, + snapshot: Snapshot { + create_every: 60s, + store_dir: "./storage/snapshot", + creation_enabled: true, + }, + telemetry: None, + dev_telemetry: None, + chain_wide: ChainWide { + max_transactions_in_block: 512, + block_time: 2s, + commit_time: 4s, + transaction_limits: TransactionLimits { + max_instruction_number: 4096, + max_wasm_size_bytes: 4194304, + }, + asset_metadata_limits: Limits { + max_len: 1048576, + max_entry_byte_size: 4096, + }, + asset_definition_metadata_limits: Limits { + max_len: 1048576, + max_entry_byte_size: 4096, + }, + account_metadata_limits: Limits { + max_len: 1048576, + max_entry_byte_size: 4096, + }, + domain_metadata_limits: Limits { + max_len: 1048576, + max_entry_byte_size: 4096, + }, + ident_length_limits: LengthLimits { + min: 1, + max: 128, + }, + wasm_runtime: WasmRuntime { + fuel_limit: 55000000, + max_memory_bytes: 524288000, + }, + }, + }"#]]; + expected.assert_eq(&format!("{config:#?}")); + + Ok(()) +} + +#[test] +fn config_with_genesis() -> Result<()> { + let _config = RootPartial::from_toml(fixtures_dir().join("minimal_alone_with_genesis.toml"))? + .unwrap_partial()? + .parse(CliContext { + submit_genesis: true, + })?; + Ok(()) +} + +#[test] +fn minimal_with_genesis_but_no_cli_arg_fails() -> Result<()> { + let error = RootPartial::from_toml(fixtures_dir().join("minimal_alone_with_genesis.toml"))? + .unwrap_partial()? + .parse(CliContext { + submit_genesis: false, + }) + .expect_err("should fail since `--submit-genesis=false`"); + + let expected = expect_test::expect![[r#" + `genesis.file` and `genesis.private_key` are presented, but `--submit-genesis` was not set + The network consists from this one peer only (no `sumeragi.trusted_peers` provided). Since `--submit-genesis` is not set, there is no way to receive the genesis block. Either provide the genesis by setting `--submit-genesis` argument, `genesis.private_key`, and `genesis.file` configuration parameters, or increase the number of trusted peers in the network using `sumeragi.trusted_peers` configuration parameter."#]]; + expected.assert_eq(&format!("{error:#}")); + + Ok(()) +} + +#[test] +fn minimal_without_genesis_but_with_submit_fails() -> Result<()> { + let error = RootPartial::from_toml(fixtures_dir().join("minimal_with_trusted_peers.toml"))? + .unwrap_partial()? + .parse(CliContext { + submit_genesis: true, + }) + .expect_err( + "should fail since there is no genesis in the config, but `--submit-genesis=true`", + ); + + let expected = expect_test::expect!["`--submit-genesis` was set, but `genesis.file` and `genesis.private_key` are not presented"]; + expected.assert_eq(&format!("{error:#}")); + + Ok(()) +} + +#[test] +fn self_is_presented_in_trusted_peers() -> Result<()> { + let config = RootPartial::from_toml(fixtures_dir().join("minimal_alone_with_genesis.toml"))? + .unwrap_partial()? + .parse(CliContext { + submit_genesis: true, + })?; + + assert!(config + .sumeragi + .trusted_peers + .contains(&config.common.peer_id())); + + Ok(()) +} + +#[test] +fn missing_fields() -> Result<()> { + let error = RootPartial::from_toml(fixtures_dir().join("bad.missing_fields.toml"))? + .unwrap_partial() + .expect_err("should fail with missing fields"); + + let expected = expect_test::expect![[r#" + missing field: `chain_id` + missing field: `public_key` + missing field: `private_key` + missing field: `genesis.public_key` + missing field: `network.address` + missing field: `torii.address`"#]]; + expected.assert_eq(&format!("{error:#}")); + + Ok(()) +} + +#[test] +fn extra_fields() { + let error = RootPartial::from_toml(fixtures_dir().join("extra_fields.toml")) + .expect_err("should fail with extra fields"); + + let expected = expect_test::expect!["cannot open file at location `tests/fixtures/extra_fields.toml`: No such file or directory (os error 2)"]; + expected.assert_eq(&format!("{error:#}")); +} + +#[test] +fn inconsistent_genesis_config() -> Result<()> { + let error = RootPartial::from_toml(fixtures_dir().join("inconsistent_genesis.toml"))? + .unwrap_partial() + .expect("all fields are present") + .parse(CliContext { + submit_genesis: false, + }) + .expect_err("should fail with bad genesis config"); + + let expected = expect_test::expect![[r#" + `genesis.file` and `genesis.private_key` should be set together + The network consists from this one peer only (no `sumeragi.trusted_peers` provided). Since `--submit-genesis` is not set, there is no way to receive the genesis block. Either provide the genesis by setting `--submit-genesis` argument, `genesis.private_key`, and `genesis.file` configuration parameters, or increase the number of trusted peers in the network using `sumeragi.trusted_peers` configuration parameter."#]]; + expected.assert_eq(&format!("{error:#}")); + + Ok(()) +} + +/// Aims the purpose of checking that every single provided env variable is consumed and parsed +/// into a valid config. +#[test] +#[allow(clippy::too_many_lines)] +fn full_envs_set_is_consumed() -> Result<()> { + let env = test_env_from_file(fixtures_dir().join("full.env")); + + let layer = RootPartial::from_env(&env)?; + + assert_eq!(env.unvisited(), HashSet::new()); + + let expected = expect_test::expect![[r#" + RootPartial { + extends: None, + chain_id: Some( + ChainId( + "0-0", + ), + ), + public_key: Some( + PublicKey( + ed25519( + "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB", + ), + ), + ), + private_key: Some( + ed25519( + "8F4C15E5D664DA3F13778801D23D4E89B76E94C1B94B389544168B6CB894F84F8BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB", + ), + ), + genesis: GenesisPartial { + public_key: Some( + PublicKey( + ed25519( + "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB", + ), + ), + ), + private_key: Some( + ed25519( + "8F4C15E5D664DA3F13778801D23D4E89B76E94C1B94B389544168B6CB894F84F8BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB", + ), + ), + file: None, + }, + kura: KuraPartial { + init_mode: Some( + Strict, + ), + store_dir: Some( + "/store/path/from/env", + ), + debug: KuraDebugPartial { + output_new_blocks: Some( + false, + ), + }, + }, + sumeragi: SumeragiPartial { + trusted_peers: None, + debug: SumeragiDebugPartial { + force_soft_fork: None, + }, + }, + network: NetworkPartial { + address: Some( + 127.0.0.1:5432, + ), + block_gossip_max_size: None, + block_gossip_period: None, + transaction_gossip_max_size: None, + transaction_gossip_period: None, + }, + logger: LoggerPartial { + level: Some( + DEBUG, + ), + format: Some( + Pretty, + ), + tokio_console_address: None, + }, + queue: QueuePartial { + capacity: None, + capacity_per_user: None, + transaction_time_to_live: None, + future_threshold: None, + }, + snapshot: SnapshotPartial { + create_every: None, + store_dir: Some( + "/snapshot/path/from/env", + ), + creation_enabled: Some( + false, + ), + }, + telemetry: TelemetryPartial { + name: None, + url: None, + min_retry_period: None, + max_retry_delay_exponent: None, + dev: TelemetryDevPartial { + out_file: None, + }, + }, + torii: ToriiPartial { + address: Some( + 127.0.0.1:8080, + ), + max_content_len: None, + query_idle_time: None, + }, + chain_wide: ChainWidePartial { + max_transactions_in_block: None, + block_time: None, + commit_time: None, + transaction_limits: None, + asset_metadata_limits: None, + asset_definition_metadata_limits: None, + account_metadata_limits: None, + domain_metadata_limits: None, + ident_length_limits: None, + wasm_fuel_limit: None, + wasm_max_memory: None, + }, + }"#]]; + expected.assert_eq(&format!("{layer:#?}")); + + Ok(()) +} + +#[test] +fn multiple_env_parsing_errors() { + let env = test_env_from_file(fixtures_dir().join("bad.multiple_bad_envs.env")); + + let error = RootPartial::from_env(&env).expect_err("the input from env is invalid"); + + let expected = expect_test::expect![[r#" + `PRIVATE_KEY_PAYLOAD` env was provided, but `PRIVATE_KEY_DIGEST` was not + failed to parse `genesis.private_key.digest_function` field from `GENESIS_PRIVATE_KEY_DIGEST` env variable + failed to parse `kura.debug.output_new_blocks` field from `KURA_DEBUG_OUTPUT_NEW_BLOCKS` env variable + failed to parse `logger.format` field from `LOG_FORMAT` env variable + failed to parse `torii.address` field from `API_ADDRESS` env variable"#]]; + expected.assert_eq(&format!("{error:#}")); +} + +#[test] +fn config_from_file_and_env() -> Result<()> { + let env = test_env_from_file(fixtures_dir().join("minimal_file_and_env.env")); + + let _config = RootPartial::from_toml(fixtures_dir().join("minimal_file_and_env.toml"))? + .merge(RootPartial::from_env(&env)?) + .unwrap_partial()? + .parse(CliContext { + submit_genesis: false, + })?; + + Ok(()) +} + +#[test] +fn fails_if_torii_address_and_p2p_address_are_equal() -> Result<()> { + let error = RootPartial::from_toml(fixtures_dir().join("bad.torii_addr_eq_p2p_addr.toml"))? + .unwrap_partial() + .expect("should not fail, all fields are present") + .parse(CliContext { + submit_genesis: false, + }) + .expect_err("should fail because of bad input"); + + let expected = + expect_test::expect!["`iroha.p2p_address` and `torii.address` should not be the same"]; + expected.assert_eq(&format!("{error:#}")); + + Ok(()) +} + +#[test] +fn fails_if_extends_leads_to_nowhere() { + let error = RootPartial::from_toml(fixtures_dir().join("bad.extends_nowhere.toml")) + .expect_err("should fail with bad input"); + + let expected = expect_test::expect!["cannot extend from `tests/fixtures/nowhere.toml`: cannot open file at location `tests/fixtures/nowhere.toml`: No such file or directory (os error 2)"]; + expected.assert_eq(&format!("{error:#}")); +} + +#[test] +fn multiple_extends_works() -> Result<()> { + // we are looking into `logger` in particular + let layer = RootPartial::from_toml(fixtures_dir().join("multiple_extends.toml"))?.logger; + + let expected = expect_test::expect![[r#" + LoggerPartial { + level: Some( + ERROR, + ), + format: Some( + Compact, + ), + tokio_console_address: None, + }"#]]; + expected.assert_eq(&format!("{layer:#?}")); + + Ok(()) +} + +#[test] +fn full_config_parses_fine() { + let _cfg = Root::load( + Some(fixtures_dir().join("full.toml")), + CliContext { + submit_genesis: true, + }, + ) + .expect("should be fine"); +} + +#[test] +fn absolute_paths_are_preserved() { + let cfg = Root::load( + Some(fixtures_dir().join("absolute_paths.toml")), + CliContext { + submit_genesis: true, + }, + ) + .expect("should be fine"); + + assert_eq!(cfg.kura.store_dir, PathBuf::from("/kura/store")); + assert_eq!(cfg.snapshot.store_dir, PathBuf::from("/snapshot/store")); + assert_eq!( + cfg.dev_telemetry.unwrap().out_file, + PathBuf::from("/telemetry/file.json") + ); + if let Genesis::Full { + file: genesis_file, .. + } = cfg.genesis + { + assert_eq!(genesis_file, PathBuf::from("/oh/my/genesis.json")); + } else { + unreachable!() + }; +} diff --git a/config/tests/fixtures/absolute_paths.toml b/config/tests/fixtures/absolute_paths.toml new file mode 100644 index 00000000000..0d1f3d3f3d5 --- /dev/null +++ b/config/tests/fixtures/absolute_paths.toml @@ -0,0 +1,14 @@ +extends = ["base.toml"] + +[kura] +store_dir = "/kura/store" + +[snapshot] +store_dir = "/snapshot/store" + +[telemetry.dev] +out_file = "/telemetry/file.json" + +[genesis] +file = "/oh/my/genesis.json" +private_key = { digest_function = "ed25519", payload = "8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb" } \ No newline at end of file diff --git a/config/tests/fixtures/bad.extends_nowhere.toml b/config/tests/fixtures/bad.extends_nowhere.toml new file mode 100644 index 00000000000..30129b39359 --- /dev/null +++ b/config/tests/fixtures/bad.extends_nowhere.toml @@ -0,0 +1 @@ +extends = "nowhere.toml" \ No newline at end of file diff --git a/config/tests/fixtures/bad.extra_fields.toml b/config/tests/fixtures/bad.extra_fields.toml new file mode 100644 index 00000000000..bc2baaf8783 --- /dev/null +++ b/config/tests/fixtures/bad.extra_fields.toml @@ -0,0 +1,4 @@ +# Iroha should not silently ignore extra fields +i_am_unknown = true +foo = false +bar = 0.5 \ No newline at end of file diff --git a/config/tests/fixtures/bad.missing_fields.toml b/config/tests/fixtures/bad.missing_fields.toml new file mode 100644 index 00000000000..d5bd33cac2e --- /dev/null +++ b/config/tests/fixtures/bad.missing_fields.toml @@ -0,0 +1 @@ +# all fields are missing \ No newline at end of file diff --git a/config/tests/fixtures/bad.multiple_bad_envs.env b/config/tests/fixtures/bad.multiple_bad_envs.env new file mode 100644 index 00000000000..12ab82cf92e --- /dev/null +++ b/config/tests/fixtures/bad.multiple_bad_envs.env @@ -0,0 +1,6 @@ +PRIVATE_KEY_PAYLOAD=8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb +GENESIS_PRIVATE_KEY_DIGEST=BAD BAD BAD +GENESIS_PRIVATE_KEY_PAYLOAD=8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb +API_ADDRESS=BAD BAD BAD +KURA_DEBUG_OUTPUT_NEW_BLOCKS=TrueŠŖ +LOG_FORMAT=what format? diff --git a/config/tests/fixtures/bad.torii_addr_eq_p2p_addr.toml b/config/tests/fixtures/bad.torii_addr_eq_p2p_addr.toml new file mode 100644 index 00000000000..79f9c324cee --- /dev/null +++ b/config/tests/fixtures/bad.torii_addr_eq_p2p_addr.toml @@ -0,0 +1,7 @@ +extends = ["base.toml", "base_trusted_peers.toml"] + +[network] +address = "127.0.0.1:8080" + +[torii] +address = "127.0.0.1:8080" diff --git a/config/tests/fixtures/base.toml b/config/tests/fixtures/base.toml new file mode 100644 index 00000000000..3ca6d219477 --- /dev/null +++ b/config/tests/fixtures/base.toml @@ -0,0 +1,13 @@ +chain_id = "0" +public_key = "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB" +private_key.digest_function = "ed25519" +private_key.payload = "8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb" + +[network] +address = "127.0.0.1:1337" + +[genesis] +public_key = "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB" + +[torii] +address = "127.0.0.1:8080" \ No newline at end of file diff --git a/config/tests/fixtures/base_trusted_peers.toml b/config/tests/fixtures/base_trusted_peers.toml new file mode 100644 index 00000000000..1314cd70026 --- /dev/null +++ b/config/tests/fixtures/base_trusted_peers.toml @@ -0,0 +1,3 @@ +[[sumeragi.trusted_peers]] +address = "127.0.0.1:1338" +public_key = "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB" diff --git a/config/tests/fixtures/empty_ok_genesis.json b/config/tests/fixtures/empty_ok_genesis.json new file mode 100644 index 00000000000..21bcda658eb --- /dev/null +++ b/config/tests/fixtures/empty_ok_genesis.json @@ -0,0 +1,4 @@ +{ + "transactions": [], + "executor": "./executor.wasm" +} \ No newline at end of file diff --git a/config/tests/fixtures/full.env b/config/tests/fixtures/full.env new file mode 100644 index 00000000000..e79ed99d747 --- /dev/null +++ b/config/tests/fixtures/full.env @@ -0,0 +1,16 @@ +CHAIN_ID=0-0 +PUBLIC_KEY=ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB +PRIVATE_KEY_DIGEST=ed25519 +PRIVATE_KEY_PAYLOAD=8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb +P2P_ADDRESS=127.0.0.1:5432 +GENESIS_PUBLIC_KEY=ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB +GENESIS_PRIVATE_KEY_DIGEST=ed25519 +GENESIS_PRIVATE_KEY_PAYLOAD=8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb +API_ADDRESS=127.0.0.1:8080 +KURA_INIT_MODE=strict +KURA_STORE_DIR=/store/path/from/env +KURA_DEBUG_OUTPUT_NEW_BLOCKS=false +LOG_LEVEL=DEBUG +LOG_FORMAT=pretty +SNAPSHOT_STORE_DIR=/snapshot/path/from/env +SNAPSHOT_CREATION_ENABLED=false diff --git a/config/tests/fixtures/full.toml b/config/tests/fixtures/full.toml new file mode 100644 index 00000000000..878223301aa --- /dev/null +++ b/config/tests/fixtures/full.toml @@ -0,0 +1,74 @@ +# This config has ALL fields specified (except `extends`) + +chain_id = "0" +public_key = "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB" +private_key = { digest_function = "ed25519", payload = "8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb" } + +[genesis] +file = "genesis.json" +public_key = "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB" +private_key = { digest_function = "ed25519", payload = "8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb" } + +[network] +address = "localhost:3840" +block_gossip_period = 10_000 +block_gossip_max_size = 4 +transaction_gossip_period = 1_000 +transaction_gossip_max_size = 500 + +[torii] +address = "localhost:5000" +max_content_len = 16 +query_idle_time = 30_000 + +[kura] +init_mode = "strict" +store_dir = "./storage" + +[kura.debug] +output_new_blocks = true + +[[sumeragi.trusted_peers]] +address = "localhost:8081" +public_key = "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB" + +[sumeragi.debug] +force_soft_fork = true + +[logger] +level = "TRACE" +format = "compact" +tokio_console_address = "127.0.0.1:5555" + +[queue] +capacity = 65536 +capacity_per_user = 65536 +transaction_time_to_live = 100 +future_threshold = 50 + +[snapshot] +creation_enabled = true +create_every = 60_000 +store_dir = "./storage/snapshot" + +[telemetry] +name = "test" +url = "http://test.com" +min_retry_period = 5_000 +max_retry_delay_exponent = 4 + +[telemetry.dev] +out_file = "./dev-telemetry.json5" + +[chain_wide] +max_transactions_in_block = 512 +block_time = 2_000 +commit_time = 4_000 +transaction_limits = {max_instruction_number = 4096, max_wasm_size_bytes = 4194304 } +asset_metadata_limits = { max_len = 1048576, max_entry_byte_size = 4096 } +asset_definition_metadata_limits = { max_len = 1048576, max_entry_byte_size = 4096 } +account_metadata_limits = { max_len = 1048576, max_entry_byte_size = 4096 } +domain_metadata_limits = { max_len = 1048576, max_entry_byte_size = 4096 } +ident_length_limits = { min = 1, max = 128 } +wasm_fuel_limit = 55000000 +wasm_max_memory = 524288000 diff --git a/config/tests/fixtures/inconsistent_genesis.toml b/config/tests/fixtures/inconsistent_genesis.toml new file mode 100644 index 00000000000..e6f38ffd2b6 --- /dev/null +++ b/config/tests/fixtures/inconsistent_genesis.toml @@ -0,0 +1,7 @@ +extends = "base.toml" + +[genesis] +private_key.digest_function = "ed25519" +private_key.payload = "8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb" +# should fail without it: +# file = ... diff --git a/config/tests/fixtures/minimal_alone_with_genesis.toml b/config/tests/fixtures/minimal_alone_with_genesis.toml new file mode 100644 index 00000000000..a6689041d21 --- /dev/null +++ b/config/tests/fixtures/minimal_alone_with_genesis.toml @@ -0,0 +1,6 @@ +extends = "base.toml" + +[genesis] +file = "./empty_ok_genesis.json" +private_key.digest_function = "ed25519" +private_key.payload = "8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb" diff --git a/config/tests/fixtures/minimal_file_and_env.env b/config/tests/fixtures/minimal_file_and_env.env new file mode 100644 index 00000000000..7ee9d329ee5 --- /dev/null +++ b/config/tests/fixtures/minimal_file_and_env.env @@ -0,0 +1 @@ +API_ADDRESS=127.0.0.1:8080 \ No newline at end of file diff --git a/config/tests/fixtures/minimal_file_and_env.toml b/config/tests/fixtures/minimal_file_and_env.toml new file mode 100644 index 00000000000..abdd50e85c2 --- /dev/null +++ b/config/tests/fixtures/minimal_file_and_env.toml @@ -0,0 +1,14 @@ +extends = "base_trusted_peers.toml" + +chain_id = "0" +public_key = "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB" +private_key.digest_function = "ed25519" +private_key.payload = "8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb" + +[network] +address = "127.0.0.1:1337" + +[genesis] +public_key = "ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB" + +# `torii.address` should be in ENV diff --git a/config/tests/fixtures/minimal_with_trusted_peers.toml b/config/tests/fixtures/minimal_with_trusted_peers.toml new file mode 100644 index 00000000000..12ebd580cbc --- /dev/null +++ b/config/tests/fixtures/minimal_with_trusted_peers.toml @@ -0,0 +1 @@ +extends = ["base.toml", "base_trusted_peers.toml"] diff --git a/config/tests/fixtures/multiple_extends.1.toml b/config/tests/fixtures/multiple_extends.1.toml new file mode 100644 index 00000000000..46b1262777b --- /dev/null +++ b/config/tests/fixtures/multiple_extends.1.toml @@ -0,0 +1,2 @@ +[logger] +format = "pretty" \ No newline at end of file diff --git a/config/tests/fixtures/multiple_extends.2.toml b/config/tests/fixtures/multiple_extends.2.toml new file mode 100644 index 00000000000..47e9616ccfd --- /dev/null +++ b/config/tests/fixtures/multiple_extends.2.toml @@ -0,0 +1,5 @@ +# sets level +extends = "multiple_extends.2a.toml" + +[logger] +format = "compact" \ No newline at end of file diff --git a/config/tests/fixtures/multiple_extends.2a.toml b/config/tests/fixtures/multiple_extends.2a.toml new file mode 100644 index 00000000000..c7b048bc674 --- /dev/null +++ b/config/tests/fixtures/multiple_extends.2a.toml @@ -0,0 +1,2 @@ +[logger] +level = "DEBUG" \ No newline at end of file diff --git a/config/tests/fixtures/multiple_extends.toml b/config/tests/fixtures/multiple_extends.toml new file mode 100644 index 00000000000..83b87043034 --- /dev/null +++ b/config/tests/fixtures/multiple_extends.toml @@ -0,0 +1,6 @@ +# 1 - sets format, 2 - sets format and level +extends = ["multiple_extends.1.toml", "multiple_extends.2.toml"] + +[logger] +# final value +level = "ERROR" \ No newline at end of file diff --git a/configs/client.template.toml b/configs/client.template.toml new file mode 100644 index 00000000000..3bad84abcc5 --- /dev/null +++ b/configs/client.template.toml @@ -0,0 +1,19 @@ +# chain_id = + +## Might be set via `TORII_URL` env var +# torii_url = + +[basic_auth] +# login = +# password = + +[account] +# id = +# public_key = +# private_key = { algorithm = "", payload = "" } + +[transaction] +# time_to_live = "100s" +# status_timeout = "100s" +## Nonce is TODO describe what it is +# nonce = false diff --git a/configs/client/config.json b/configs/client/config.json deleted file mode 100644 index b8a507409ac..00000000000 --- a/configs/client/config.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "CHAIN_ID": "00000000-0000-0000-0000-000000000000", - "PUBLIC_KEY": "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0", - "PRIVATE_KEY": { - "digest_function": "ed25519", - "payload": "9ac47abf59b356e0bd7dcbbbb4dec080e302156a48ca907e47cb6aea1d32719e7233bfc89dcbd68c19fde6ce6158225298ec1131b6a130d1aeb454c1ab5183c0" - }, - "ACCOUNT_ID": "alice@wonderland", - "BASIC_AUTH": { - "web_login": "mad_hatter", - "password": "ilovetea" - }, - "TORII_API_URL": "http://127.0.0.1:8080/", - "TRANSACTION_TIME_TO_LIVE_MS": 100000, - "TRANSACTION_STATUS_TIMEOUT_MS": 15000, - "ADD_TRANSACTION_NONCE": false -} diff --git a/configs/client/lts/config.json b/configs/client/lts/config.json deleted file mode 100644 index e1763c4d801..00000000000 --- a/configs/client/lts/config.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "PUBLIC_KEY": null, - "PRIVATE_KEY": null, - "DISABLE_PANIC_TERMINAL_COLORS": false, - "KURA": { - "INIT_MODE": "strict", - "BLOCK_STORE_PATH": "./storage", - "BLOCKS_PER_STORAGE_FILE": 1000, - "ACTOR_CHANNEL_CAPACITY": 100, - "DEBUG_OUTPUT_NEW_BLOCKS": false - }, - "SUMERAGI": { - "KEY_PAIR": null, - "PEER_ID": null, - "BLOCK_TIME_MS": 1000, - "TRUSTED_PEERS": null, - "COMMIT_TIME_LIMIT_MS": 2000, - "TX_RECEIPT_TIME_LIMIT_MS": 500, - "TRANSACTION_LIMITS": { - "max_instruction_number": 4096, - "max_wasm_size_bytes": 4194304 - }, - "ACTOR_CHANNEL_CAPACITY": 100, - "GOSSIP_BATCH_SIZE": 500, - "GOSSIP_PERIOD_MS": 1000 - }, - "TORII": { - "P2P_ADDR": null, - "API_URL": null, - "TELEMETRY_URL": null, - "MAX_TRANSACTION_SIZE": 32768, - "MAX_CONTENT_LEN": 16384000 - }, - "BLOCK_SYNC": { - "GOSSIP_PERIOD_MS": 10000, - "BLOCK_BATCH_SIZE": 4, - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "QUEUE": { - "MAXIMUM_TRANSACTIONS_IN_BLOCK": 8192, - "MAXIMUM_TRANSACTIONS_IN_QUEUE": 65536, - "TRANSACTION_TIME_TO_LIVE_MS": 86400000, - "FUTURE_THRESHOLD_MS": 1000 - }, - "LOGGER": { - "MAX_LOG_LEVEL": "INFO", - "TELEMETRY_CAPACITY": 1000, - "COMPACT_MODE": false, - "LOG_FILE_PATH": null, - "TERMINAL_COLORS": true - }, - "GENESIS": { - "ACCOUNT_PUBLIC_KEY": null, - "ACCOUNT_PRIVATE_KEY": null, - "WAIT_FOR_PEERS_RETRY_COUNT_LIMIT": 100, - "WAIT_FOR_PEERS_RETRY_PERIOD_MS": 500, - "GENESIS_SUBMISSION_DELAY_MS": 1000 - }, - "WSV": { - "ASSET_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ASSET_DEFINITION_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ACCOUNT_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "DOMAIN_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "IDENT_LENGTH_LIMITS": { - "min": 1, - "max": 128 - }, - "WASM_RUNTIME_CONFIG": { - "FUEL_LIMIT": 1000000, - "MAX_MEMORY": 524288000 - } - }, - "NETWORK": { - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "TELEMETRY": { - "NAME": null, - "URL": null, - "MIN_RETRY_PERIOD": 1, - "MAX_RETRY_DELAY_EXPONENT": 4, - "FILE": null - } -} diff --git a/configs/client/stable/config.json b/configs/client/stable/config.json deleted file mode 100644 index e1763c4d801..00000000000 --- a/configs/client/stable/config.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "PUBLIC_KEY": null, - "PRIVATE_KEY": null, - "DISABLE_PANIC_TERMINAL_COLORS": false, - "KURA": { - "INIT_MODE": "strict", - "BLOCK_STORE_PATH": "./storage", - "BLOCKS_PER_STORAGE_FILE": 1000, - "ACTOR_CHANNEL_CAPACITY": 100, - "DEBUG_OUTPUT_NEW_BLOCKS": false - }, - "SUMERAGI": { - "KEY_PAIR": null, - "PEER_ID": null, - "BLOCK_TIME_MS": 1000, - "TRUSTED_PEERS": null, - "COMMIT_TIME_LIMIT_MS": 2000, - "TX_RECEIPT_TIME_LIMIT_MS": 500, - "TRANSACTION_LIMITS": { - "max_instruction_number": 4096, - "max_wasm_size_bytes": 4194304 - }, - "ACTOR_CHANNEL_CAPACITY": 100, - "GOSSIP_BATCH_SIZE": 500, - "GOSSIP_PERIOD_MS": 1000 - }, - "TORII": { - "P2P_ADDR": null, - "API_URL": null, - "TELEMETRY_URL": null, - "MAX_TRANSACTION_SIZE": 32768, - "MAX_CONTENT_LEN": 16384000 - }, - "BLOCK_SYNC": { - "GOSSIP_PERIOD_MS": 10000, - "BLOCK_BATCH_SIZE": 4, - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "QUEUE": { - "MAXIMUM_TRANSACTIONS_IN_BLOCK": 8192, - "MAXIMUM_TRANSACTIONS_IN_QUEUE": 65536, - "TRANSACTION_TIME_TO_LIVE_MS": 86400000, - "FUTURE_THRESHOLD_MS": 1000 - }, - "LOGGER": { - "MAX_LOG_LEVEL": "INFO", - "TELEMETRY_CAPACITY": 1000, - "COMPACT_MODE": false, - "LOG_FILE_PATH": null, - "TERMINAL_COLORS": true - }, - "GENESIS": { - "ACCOUNT_PUBLIC_KEY": null, - "ACCOUNT_PRIVATE_KEY": null, - "WAIT_FOR_PEERS_RETRY_COUNT_LIMIT": 100, - "WAIT_FOR_PEERS_RETRY_PERIOD_MS": 500, - "GENESIS_SUBMISSION_DELAY_MS": 1000 - }, - "WSV": { - "ASSET_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ASSET_DEFINITION_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ACCOUNT_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "DOMAIN_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "IDENT_LENGTH_LIMITS": { - "min": 1, - "max": 128 - }, - "WASM_RUNTIME_CONFIG": { - "FUEL_LIMIT": 1000000, - "MAX_MEMORY": 524288000 - } - }, - "NETWORK": { - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "TELEMETRY": { - "NAME": null, - "URL": null, - "MIN_RETRY_PERIOD": 1, - "MAX_RETRY_DELAY_EXPONENT": 4, - "FILE": null - } -} diff --git a/configs/peer.template.toml b/configs/peer.template.toml new file mode 100644 index 00000000000..855e44c6c0b --- /dev/null +++ b/configs/peer.template.toml @@ -0,0 +1,67 @@ +## For the full reference, go to (TODO put link) + +## You can use another TOML file to extend from. +## For a single file extension: +# extends = "./base.toml" +## Or, for a chain of extensions: +# extends = ["base-1.toml", "base-2.toml"] + +# chain_id = +# public_key = +# private_key = { +# algorithm = , +# payload = +# } + +[genesis] +# file = +# public_key = +# private_key = { algorithm = "", payload = "" } + +[network] +# address = +# block_gossip_period = "10s" +# block_gossip_max_size = 4 +# transaction_gossip_period = "1s" +# transaction_gossip_max_size = 500 + +[torii] +# address = +# max_content_len = "16mb" +# query_idle_time = "30s" + +[kura] +# init_mode = "strict" +# store_dir = "./storage" + +## Add more of this section for each trusted peer +# [[sumeragi.trusted_peers]] +# address = +# public_key = + +[logger] +# level = "INFO" +# format = "full" +# tokio_console_address = "127.0.0.1:5555" + +## Transactions Queue +[queue] +# capacity = 65536 +# capacity_per_user = 65536 +# transaction_time_to_live = "1day" +# future_threshold = "1s" + +[snapshot] +# creation_enabled = true +# create_every = "1min" +# store_dir = "./storage/snapshot" + +[telemetry] +# name = +# url = +# min_retry_period = "1s" +# max_retry_delay_exponent = 4 + +[telemetry.dev] +## FIXME: is it JSON5? +# out_file = "./dev-telemetry.json5" diff --git a/configs/peer/config.json b/configs/peer/config.json deleted file mode 100644 index 649b25f31c4..00000000000 --- a/configs/peer/config.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "CHAIN_ID": null, - "PUBLIC_KEY": null, - "PRIVATE_KEY": null, - "KURA": { - "INIT_MODE": "strict", - "BLOCK_STORE_PATH": "./storage", - "DEBUG_OUTPUT_NEW_BLOCKS": false - }, - "SUMERAGI": { - "KEY_PAIR": null, - "PEER_ID": null, - "BLOCK_TIME_MS": 2000, - "TRUSTED_PEERS": null, - "COMMIT_TIME_LIMIT_MS": 4000, - "MAX_TRANSACTIONS_IN_BLOCK": 512, - "ACTOR_CHANNEL_CAPACITY": 100, - "GOSSIP_BATCH_SIZE": 500, - "GOSSIP_PERIOD_MS": 1000 - }, - "TORII": { - "P2P_ADDR": null, - "API_URL": null, - "MAX_TRANSACTION_SIZE": 32768, - "MAX_CONTENT_LEN": 16384000 - }, - "BLOCK_SYNC": { - "GOSSIP_PERIOD_MS": 10000, - "BLOCK_BATCH_SIZE": 4, - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "QUEUE": { - "MAX_TRANSACTIONS_IN_QUEUE": 65536, - "MAX_TRANSACTIONS_IN_QUEUE_PER_USER": 65536, - "TRANSACTION_TIME_TO_LIVE_MS": 86400000, - "FUTURE_THRESHOLD_MS": 1000 - }, - "LOGGER": { - "LEVEL": "INFO", - "FORMAT": "full" - }, - "GENESIS": { - "PUBLIC_KEY": null, - "PRIVATE_KEY": null, - "FILE": null - }, - "WSV": { - "ASSET_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ASSET_DEFINITION_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ACCOUNT_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "DOMAIN_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "IDENT_LENGTH_LIMITS": { - "min": 1, - "max": 128 - }, - "TRANSACTION_LIMITS": { - "max_instruction_number": 4096, - "max_wasm_size_bytes": 4194304 - }, - "WASM_RUNTIME_CONFIG": { - "FUEL_LIMIT": 55000000, - "MAX_MEMORY": 524288000 - } - }, - "NETWORK": { - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "TELEMETRY": { - "NAME": null, - "URL": null, - "MIN_RETRY_PERIOD": 1, - "MAX_RETRY_DELAY_EXPONENT": 4, - "FILE": null - }, - "SNAPSHOT": { - "CREATE_EVERY_MS": 60000, - "DIR_PATH": "./storage", - "CREATION_ENABLED": true - }, - "LIVE_QUERY_STORE": { - "QUERY_IDLE_TIME_MS": 30000 - } -} diff --git a/configs/peer/lts/config.json b/configs/peer/lts/config.json deleted file mode 100644 index ef36a9f525c..00000000000 --- a/configs/peer/lts/config.json +++ /dev/null @@ -1,98 +0,0 @@ -{ - "PUBLIC_KEY": null, - "PRIVATE_KEY": null, - "DISABLE_PANIC_TERMINAL_COLORS": false, - "KURA": { - "INIT_MODE": "strict", - "BLOCK_STORE_PATH": "./storage", - "BLOCKS_PER_STORAGE_FILE": 1000, - "ACTOR_CHANNEL_CAPACITY": 100, - "DEBUG_OUTPUT_NEW_BLOCKS": false - }, - "SUMERAGI": { - "KEY_PAIR": null, - "PEER_ID": null, - "BLOCK_TIME_MS": 2000, - "TRUSTED_PEERS": null, - "COMMIT_TIME_LIMIT_MS": 4000, - "MAX_TRANSACTIONS_IN_BLOCK": 512, - "ACTOR_CHANNEL_CAPACITY": 100, - "GOSSIP_BATCH_SIZE": 500, - "GOSSIP_PERIOD_MS": 1000 - }, - "TORII": { - "P2P_ADDR": null, - "API_URL": null, - "MAX_TRANSACTION_SIZE": 32768, - "MAX_CONTENT_LEN": 16384000, - "FETCH_SIZE": 10, - "QUERY_IDLE_TIME_MS": 30000 - }, - "BLOCK_SYNC": { - "GOSSIP_PERIOD_MS": 10000, - "BLOCK_BATCH_SIZE": 4, - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "QUEUE": { - "MAX_TRANSACTIONS_IN_QUEUE": 65536, - "MAX_TRANSACTIONS_IN_QUEUE_PER_USER": 65536, - "TRANSACTION_TIME_TO_LIVE_MS": 86400000, - "FUTURE_THRESHOLD_MS": 1000 - }, - "LOGGER": { - "MAX_LOG_LEVEL": "INFO", - "TELEMETRY_CAPACITY": 1000, - "COMPACT_MODE": false, - "LOG_FILE_PATH": null, - "TERMINAL_COLORS": true - }, - "GENESIS": { - "ACCOUNT_PUBLIC_KEY": null, - "ACCOUNT_PRIVATE_KEY": null - }, - "WSV": { - "ASSET_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ASSET_DEFINITION_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ACCOUNT_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "DOMAIN_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "IDENT_LENGTH_LIMITS": { - "min": 1, - "max": 128 - }, - "TRANSACTION_LIMITS": { - "max_instruction_number": 4096, - "max_wasm_size_bytes": 4194304 - }, - "WASM_RUNTIME_CONFIG": { - "FUEL_LIMIT": 23000000, - "MAX_MEMORY": 524288000 - } - }, - "NETWORK": { - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "TELEMETRY": { - "NAME": null, - "URL": null, - "MIN_RETRY_PERIOD": 1, - "MAX_RETRY_DELAY_EXPONENT": 4, - "FILE": null - }, - "SNAPSHOT": { - "CREATE_EVERY_MS": 60000, - "DIR_PATH": "./storage", - "CREATION_ENABLED": true - } -} diff --git a/configs/peer/lts/executor.wasm b/configs/peer/lts/executor.wasm deleted file mode 100644 index 544c9e29dfa..00000000000 Binary files a/configs/peer/lts/executor.wasm and /dev/null differ diff --git a/configs/peer/lts/genesis.json b/configs/peer/lts/genesis.json deleted file mode 100644 index 2ca5d0365ed..00000000000 --- a/configs/peer/lts/genesis.json +++ /dev/null @@ -1,201 +0,0 @@ -{ - "transactions": [ - [ - { - "Register": { - "NewDomain": { - "id": "wonderland", - "logo": null, - "metadata": { - "key": { - "String": "value" - } - } - } - } - }, - { - "Register": { - "NewAccount": { - "id": "alice@wonderland", - "signatories": [ - "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" - ], - "metadata": { - "key": { - "String": "value" - } - } - } - } - }, - { - "Register": { - "NewAccount": { - "id": "bob@wonderland", - "signatories": [ - "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" - ], - "metadata": { - "key": { - "String": "value" - } - } - } - } - }, - { - "Register": { - "NewAssetDefinition": { - "id": "rose#wonderland", - "value_type": "Quantity", - "mintable": "Infinitely", - "logo": null, - "metadata": {} - } - } - }, - { - "Register": { - "NewDomain": { - "id": "garden_of_live_flowers", - "logo": null, - "metadata": {} - } - } - }, - { - "Register": { - "NewAccount": { - "id": "carpenter@garden_of_live_flowers", - "signatories": [ - "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" - ], - "metadata": {} - } - } - }, - { - "Register": { - "NewAssetDefinition": { - "id": "cabbage#garden_of_live_flowers", - "value_type": "Quantity", - "mintable": "Infinitely", - "logo": null, - "metadata": {} - } - } - }, - { - "Mint": { - "object": "13_u32", - "destination_id": { - "AssetId": "rose##alice@wonderland" - } - } - }, - { - "Mint": { - "object": "44_u32", - "destination_id": { - "AssetId": "cabbage#garden_of_live_flowers#alice@wonderland" - } - } - }, - { - "Grant": { - "object": { - "PermissionToken": { - "definition_id": "CanSetParameters", - "payload": null - } - }, - "destination_id": { - "AccountId": "alice@wonderland" - } - } - }, - { - "Sequence": [ - { - "NewParameter": { - "Parameter": "?MaxTransactionsInBlock=512" - } - }, - { - "NewParameter": { - "Parameter": "?BlockTime=2000" - } - }, - { - "NewParameter": { - "Parameter": "?CommitTimeLimit=4000" - } - }, - { - "NewParameter": { - "Parameter": "?TransactionLimits=4096,4194304_TL" - } - }, - { - "NewParameter": { - "Parameter": "?WSVAssetMetadataLimits=1048576,4096_ML" - } - }, - { - "NewParameter": { - "Parameter": "?WSVAssetDefinitionMetadataLimits=1048576,4096_ML" - } - }, - { - "NewParameter": { - "Parameter": "?WSVAccountMetadataLimits=1048576,4096_ML" - } - }, - { - "NewParameter": { - "Parameter": "?WSVDomainMetadataLimits=1048576,4096_ML" - } - }, - { - "NewParameter": { - "Parameter": "?WSVIdentLengthLimits=1,128_LL" - } - }, - { - "NewParameter": { - "Parameter": "?WASMFuelLimit=23000000" - } - }, - { - "NewParameter": { - "Parameter": "?WASMMaxMemory=524288000" - } - } - ] - }, - { - "Register": { - "NewRole": { - "id": "ALICE_METADATA_ACCESS", - "permissions": [ - { - "definition_id": "CanRemoveKeyValueInUserAccount", - "payload": { - "account_id": "alice@wonderland" - } - }, - { - "definition_id": "CanSetKeyValueInUserAccount", - "payload": { - "account_id": "alice@wonderland" - } - } - ] - } - } - } - ] - ], - "executor": "./executor.wasm" -} diff --git a/configs/peer/stable/config.json b/configs/peer/stable/config.json deleted file mode 100644 index ef36a9f525c..00000000000 --- a/configs/peer/stable/config.json +++ /dev/null @@ -1,98 +0,0 @@ -{ - "PUBLIC_KEY": null, - "PRIVATE_KEY": null, - "DISABLE_PANIC_TERMINAL_COLORS": false, - "KURA": { - "INIT_MODE": "strict", - "BLOCK_STORE_PATH": "./storage", - "BLOCKS_PER_STORAGE_FILE": 1000, - "ACTOR_CHANNEL_CAPACITY": 100, - "DEBUG_OUTPUT_NEW_BLOCKS": false - }, - "SUMERAGI": { - "KEY_PAIR": null, - "PEER_ID": null, - "BLOCK_TIME_MS": 2000, - "TRUSTED_PEERS": null, - "COMMIT_TIME_LIMIT_MS": 4000, - "MAX_TRANSACTIONS_IN_BLOCK": 512, - "ACTOR_CHANNEL_CAPACITY": 100, - "GOSSIP_BATCH_SIZE": 500, - "GOSSIP_PERIOD_MS": 1000 - }, - "TORII": { - "P2P_ADDR": null, - "API_URL": null, - "MAX_TRANSACTION_SIZE": 32768, - "MAX_CONTENT_LEN": 16384000, - "FETCH_SIZE": 10, - "QUERY_IDLE_TIME_MS": 30000 - }, - "BLOCK_SYNC": { - "GOSSIP_PERIOD_MS": 10000, - "BLOCK_BATCH_SIZE": 4, - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "QUEUE": { - "MAX_TRANSACTIONS_IN_QUEUE": 65536, - "MAX_TRANSACTIONS_IN_QUEUE_PER_USER": 65536, - "TRANSACTION_TIME_TO_LIVE_MS": 86400000, - "FUTURE_THRESHOLD_MS": 1000 - }, - "LOGGER": { - "MAX_LOG_LEVEL": "INFO", - "TELEMETRY_CAPACITY": 1000, - "COMPACT_MODE": false, - "LOG_FILE_PATH": null, - "TERMINAL_COLORS": true - }, - "GENESIS": { - "ACCOUNT_PUBLIC_KEY": null, - "ACCOUNT_PRIVATE_KEY": null - }, - "WSV": { - "ASSET_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ASSET_DEFINITION_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ACCOUNT_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "DOMAIN_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "IDENT_LENGTH_LIMITS": { - "min": 1, - "max": 128 - }, - "TRANSACTION_LIMITS": { - "max_instruction_number": 4096, - "max_wasm_size_bytes": 4194304 - }, - "WASM_RUNTIME_CONFIG": { - "FUEL_LIMIT": 23000000, - "MAX_MEMORY": 524288000 - } - }, - "NETWORK": { - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "TELEMETRY": { - "NAME": null, - "URL": null, - "MIN_RETRY_PERIOD": 1, - "MAX_RETRY_DELAY_EXPONENT": 4, - "FILE": null - }, - "SNAPSHOT": { - "CREATE_EVERY_MS": 60000, - "DIR_PATH": "./storage", - "CREATION_ENABLED": true - } -} diff --git a/configs/peer/stable/executor.wasm b/configs/peer/stable/executor.wasm deleted file mode 100644 index 544c9e29dfa..00000000000 Binary files a/configs/peer/stable/executor.wasm and /dev/null differ diff --git a/configs/peer/stable/genesis.json b/configs/peer/stable/genesis.json deleted file mode 100644 index 2ca5d0365ed..00000000000 --- a/configs/peer/stable/genesis.json +++ /dev/null @@ -1,201 +0,0 @@ -{ - "transactions": [ - [ - { - "Register": { - "NewDomain": { - "id": "wonderland", - "logo": null, - "metadata": { - "key": { - "String": "value" - } - } - } - } - }, - { - "Register": { - "NewAccount": { - "id": "alice@wonderland", - "signatories": [ - "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" - ], - "metadata": { - "key": { - "String": "value" - } - } - } - } - }, - { - "Register": { - "NewAccount": { - "id": "bob@wonderland", - "signatories": [ - "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" - ], - "metadata": { - "key": { - "String": "value" - } - } - } - } - }, - { - "Register": { - "NewAssetDefinition": { - "id": "rose#wonderland", - "value_type": "Quantity", - "mintable": "Infinitely", - "logo": null, - "metadata": {} - } - } - }, - { - "Register": { - "NewDomain": { - "id": "garden_of_live_flowers", - "logo": null, - "metadata": {} - } - } - }, - { - "Register": { - "NewAccount": { - "id": "carpenter@garden_of_live_flowers", - "signatories": [ - "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" - ], - "metadata": {} - } - } - }, - { - "Register": { - "NewAssetDefinition": { - "id": "cabbage#garden_of_live_flowers", - "value_type": "Quantity", - "mintable": "Infinitely", - "logo": null, - "metadata": {} - } - } - }, - { - "Mint": { - "object": "13_u32", - "destination_id": { - "AssetId": "rose##alice@wonderland" - } - } - }, - { - "Mint": { - "object": "44_u32", - "destination_id": { - "AssetId": "cabbage#garden_of_live_flowers#alice@wonderland" - } - } - }, - { - "Grant": { - "object": { - "PermissionToken": { - "definition_id": "CanSetParameters", - "payload": null - } - }, - "destination_id": { - "AccountId": "alice@wonderland" - } - } - }, - { - "Sequence": [ - { - "NewParameter": { - "Parameter": "?MaxTransactionsInBlock=512" - } - }, - { - "NewParameter": { - "Parameter": "?BlockTime=2000" - } - }, - { - "NewParameter": { - "Parameter": "?CommitTimeLimit=4000" - } - }, - { - "NewParameter": { - "Parameter": "?TransactionLimits=4096,4194304_TL" - } - }, - { - "NewParameter": { - "Parameter": "?WSVAssetMetadataLimits=1048576,4096_ML" - } - }, - { - "NewParameter": { - "Parameter": "?WSVAssetDefinitionMetadataLimits=1048576,4096_ML" - } - }, - { - "NewParameter": { - "Parameter": "?WSVAccountMetadataLimits=1048576,4096_ML" - } - }, - { - "NewParameter": { - "Parameter": "?WSVDomainMetadataLimits=1048576,4096_ML" - } - }, - { - "NewParameter": { - "Parameter": "?WSVIdentLengthLimits=1,128_LL" - } - }, - { - "NewParameter": { - "Parameter": "?WASMFuelLimit=23000000" - } - }, - { - "NewParameter": { - "Parameter": "?WASMMaxMemory=524288000" - } - } - ] - }, - { - "Register": { - "NewRole": { - "id": "ALICE_METADATA_ACCESS", - "permissions": [ - { - "definition_id": "CanRemoveKeyValueInUserAccount", - "payload": { - "account_id": "alice@wonderland" - } - }, - { - "definition_id": "CanSetKeyValueInUserAccount", - "payload": { - "account_id": "alice@wonderland" - } - } - ] - } - } - } - ] - ], - "executor": "./executor.wasm" -} diff --git a/configs/prometheus.yml b/configs/prometheus.template.yml similarity index 100% rename from configs/prometheus.yml rename to configs/prometheus.template.yml diff --git a/configs/swarm/client.toml b/configs/swarm/client.toml new file mode 100644 index 00000000000..bc2a82df05f --- /dev/null +++ b/configs/swarm/client.toml @@ -0,0 +1,11 @@ +chain_id = "00000000-0000-0000-0000-000000000000" +torii_url = "http://127.0.0.1:8080/" + +[basic_auth] +web_login = "mad_hatter" +password = "ilovetea" + +[account] +id = "alice@wonderland" +public_key = "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" +private_key = { digest_function = "ed25519", payload = "9ac47abf59b356e0bd7dcbbbb4dec080e302156a48ca907e47cb6aea1d32719e7233bfc89dcbd68c19fde6ce6158225298ec1131b6a130d1aeb454c1ab5183c0" } diff --git a/docker-compose.local.yml b/configs/swarm/docker-compose.local.yml similarity index 50% rename from docker-compose.local.yml rename to configs/swarm/docker-compose.local.yml index 6c2cd371db2..fe10bcaabb1 100644 --- a/docker-compose.local.yml +++ b/configs/swarm/docker-compose.local.yml @@ -4,24 +4,25 @@ version: '3.8' services: iroha0: - build: ./ + build: ../.. platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"}' - TORII_P2P_ADDR: 0.0.0.0:1337 - TORII_API_URL: 0.0.0.0:8080 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 - IROHA_GENESIS_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"82b3bde54aebeca4146257da0de8d59d8e46d5fe34887dcd8072866792fcb3ad4164bf554923ece1fd412d241036d863a6ae430476c898248b8237d77534cfc4"}' - IROHA_GENESIS_FILE: /config/genesis.json + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: 8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb + P2P_ADDRESS: 0.0.0.0:1337 + API_ADDRESS: 0.0.0.0:8080 + GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + GENESIS_PRIVATE_KEY_DIGEST: ed25519 + GENESIS_PRIVATE_KEY_PAYLOAD: 82b3bde54aebeca4146257da0de8d59d8e46d5fe34887dcd8072866792fcb3ad4164bf554923ece1fd412d241036d863a6ae430476c898248b8237d77534cfc4 + GENESIS_FILE: /config/genesis.json SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' ports: - 1337:1337 - 8080:8080 volumes: - - ./configs/peer:/config + - ./:/config init: true command: iroha --submit-genesis healthcheck: @@ -31,22 +32,22 @@ services: retries: 30 start_period: 4s iroha1: - build: ./ + build: ../.. platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"c02ffad5e455e7ec620d74de5769681e4d8385906bce5a437eb67452a9efbbc2815bbdc9775d28c3633269b25f22d048e2aa2e36017cbe5ad85f15220beb6f6f"}' - TORII_P2P_ADDR: 0.0.0.0:1338 - TORII_API_URL: 0.0.0.0:8081 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: c02ffad5e455e7ec620d74de5769681e4d8385906bce5a437eb67452a9efbbc2815bbdc9775d28c3633269b25f22d048e2aa2e36017cbe5ad85f15220beb6f6f + P2P_ADDRESS: 0.0.0.0:1338 + API_ADDRESS: 0.0.0.0:8081 + GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' ports: - 1338:1338 - 8081:8081 volumes: - - ./configs/peer:/config + - ./:/config init: true healthcheck: test: test $(curl -s http://127.0.0.1:8081/status/blocks) -gt 0 @@ -55,22 +56,22 @@ services: retries: 30 start_period: 4s iroha2: - build: ./ + build: ../.. platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736 - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"29c5ed1409cb10fd791bc4ff8a6cb5e22a5fae7e36f448ef3ea2988b1319a88bf417e0371e6adb32fd66749477402b1ab67f84a8e9b082e997980cc91f327736"}' - TORII_P2P_ADDR: 0.0.0.0:1339 - TORII_API_URL: 0.0.0.0:8082 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736 + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: 29c5ed1409cb10fd791bc4ff8a6cb5e22a5fae7e36f448ef3ea2988b1319a88bf417e0371e6adb32fd66749477402b1ab67f84a8e9b082e997980cc91f327736 + P2P_ADDRESS: 0.0.0.0:1339 + API_ADDRESS: 0.0.0.0:8082 + GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"}]' ports: - 1339:1339 - 8082:8082 volumes: - - ./configs/peer:/config + - ./:/config init: true healthcheck: test: test $(curl -s http://127.0.0.1:8082/status/blocks) -gt 0 @@ -79,22 +80,22 @@ services: retries: 30 start_period: 4s iroha3: - build: ./ + build: ../.. platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61 - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"5eed4855fad183c451aac39dfc50831607e4cf408c98e2b977f3ce4a2df42ce2a66522370d60b9c09e79ade2e9bb1ef2e78733a944b999b3a6aee687ce476d61"}' - TORII_P2P_ADDR: 0.0.0.0:1340 - TORII_API_URL: 0.0.0.0:8083 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61 + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: 5eed4855fad183c451aac39dfc50831607e4cf408c98e2b977f3ce4a2df42ce2a66522370d60b9c09e79ade2e9bb1ef2e78733a944b999b3a6aee687ce476d61 + P2P_ADDRESS: 0.0.0.0:1340 + API_ADDRESS: 0.0.0.0:8083 + GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' ports: - 1340:1340 - 8083:8083 volumes: - - ./configs/peer:/config + - ./:/config init: true healthcheck: test: test $(curl -s http://127.0.0.1:8083/status/blocks) -gt 0 diff --git a/configs/swarm/docker-compose.single.yml b/configs/swarm/docker-compose.single.yml new file mode 100644 index 00000000000..5af2868b817 --- /dev/null +++ b/configs/swarm/docker-compose.single.yml @@ -0,0 +1,32 @@ +# This file is generated by iroha_swarm. +# Do not edit it manually. + +version: '3.8' +services: + iroha0: + build: ../.. + platform: linux/amd64 + environment: + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: 8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb + P2P_ADDRESS: 0.0.0.0:1337 + API_ADDRESS: 0.0.0.0:8080 + GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + GENESIS_PRIVATE_KEY_DIGEST: ed25519 + GENESIS_PRIVATE_KEY_PAYLOAD: 82b3bde54aebeca4146257da0de8d59d8e46d5fe34887dcd8072866792fcb3ad4164bf554923ece1fd412d241036d863a6ae430476c898248b8237d77534cfc4 + GENESIS_FILE: /config/genesis.json + ports: + - 1337:1337 + - 8080:8080 + volumes: + - ./:/config + init: true + command: iroha --submit-genesis + healthcheck: + test: test $(curl -s http://127.0.0.1:8080/status/blocks) -gt 0 + interval: 2s + timeout: 1s + retries: 30 + start_period: 4s diff --git a/docker-compose.yml b/configs/swarm/docker-compose.yml similarity index 52% rename from docker-compose.yml rename to configs/swarm/docker-compose.yml index af679a88066..c21b8300a89 100644 --- a/docker-compose.yml +++ b/configs/swarm/docker-compose.yml @@ -7,21 +7,22 @@ services: image: hyperledger/iroha2:dev platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"}' - TORII_P2P_ADDR: 0.0.0.0:1337 - TORII_API_URL: 0.0.0.0:8080 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 - IROHA_GENESIS_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"82b3bde54aebeca4146257da0de8d59d8e46d5fe34887dcd8072866792fcb3ad4164bf554923ece1fd412d241036d863a6ae430476c898248b8237d77534cfc4"}' - IROHA_GENESIS_FILE: /config/genesis.json + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: 8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb + P2P_ADDRESS: 0.0.0.0:1337 + API_ADDRESS: 0.0.0.0:8080 + GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + GENESIS_PRIVATE_KEY_DIGEST: ed25519 + GENESIS_PRIVATE_KEY_PAYLOAD: 82b3bde54aebeca4146257da0de8d59d8e46d5fe34887dcd8072866792fcb3ad4164bf554923ece1fd412d241036d863a6ae430476c898248b8237d77534cfc4 + GENESIS_FILE: /config/genesis.json SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' ports: - 1337:1337 - 8080:8080 volumes: - - ./configs/peer:/config + - ./:/config init: true command: iroha --submit-genesis healthcheck: @@ -34,19 +35,19 @@ services: image: hyperledger/iroha2:dev platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"c02ffad5e455e7ec620d74de5769681e4d8385906bce5a437eb67452a9efbbc2815bbdc9775d28c3633269b25f22d048e2aa2e36017cbe5ad85f15220beb6f6f"}' - TORII_P2P_ADDR: 0.0.0.0:1338 - TORII_API_URL: 0.0.0.0:8081 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: c02ffad5e455e7ec620d74de5769681e4d8385906bce5a437eb67452a9efbbc2815bbdc9775d28c3633269b25f22d048e2aa2e36017cbe5ad85f15220beb6f6f + P2P_ADDRESS: 0.0.0.0:1338 + API_ADDRESS: 0.0.0.0:8081 + GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' ports: - 1338:1338 - 8081:8081 volumes: - - ./configs/peer:/config + - ./:/config init: true healthcheck: test: test $(curl -s http://127.0.0.1:8081/status/blocks) -gt 0 @@ -58,19 +59,19 @@ services: image: hyperledger/iroha2:dev platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736 - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"29c5ed1409cb10fd791bc4ff8a6cb5e22a5fae7e36f448ef3ea2988b1319a88bf417e0371e6adb32fd66749477402b1ab67f84a8e9b082e997980cc91f327736"}' - TORII_P2P_ADDR: 0.0.0.0:1339 - TORII_API_URL: 0.0.0.0:8082 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736 + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: 29c5ed1409cb10fd791bc4ff8a6cb5e22a5fae7e36f448ef3ea2988b1319a88bf417e0371e6adb32fd66749477402b1ab67f84a8e9b082e997980cc91f327736 + P2P_ADDRESS: 0.0.0.0:1339 + API_ADDRESS: 0.0.0.0:8082 + GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha3:1340","public_key":"ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61"}]' ports: - 1339:1339 - 8082:8082 volumes: - - ./configs/peer:/config + - ./:/config init: true healthcheck: test: test $(curl -s http://127.0.0.1:8082/status/blocks) -gt 0 @@ -82,19 +83,19 @@ services: image: hyperledger/iroha2:dev platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61 - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"5eed4855fad183c451aac39dfc50831607e4cf408c98e2b977f3ce4a2df42ce2a66522370d60b9c09e79ade2e9bb1ef2e78733a944b999b3a6aee687ce476d61"}' - TORII_P2P_ADDR: 0.0.0.0:1340 - TORII_API_URL: 0.0.0.0:8083 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61 + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: 5eed4855fad183c451aac39dfc50831607e4cf408c98e2b977f3ce4a2df42ce2a66522370d60b9c09e79ade2e9bb1ef2e78733a944b999b3a6aee687ce476d61 + P2P_ADDRESS: 0.0.0.0:1340 + API_ADDRESS: 0.0.0.0:8083 + GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha1:1338","public_key":"ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F"},{"address":"iroha0:1337","public_key":"ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB"},{"address":"iroha2:1339","public_key":"ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736"}]' ports: - 1340:1340 - 8083:8083 volumes: - - ./configs/peer:/config + - ./:/config init: true healthcheck: test: test $(curl -s http://127.0.0.1:8083/status/blocks) -gt 0 diff --git a/configs/peer/executor.wasm b/configs/swarm/executor.wasm similarity index 100% rename from configs/peer/executor.wasm rename to configs/swarm/executor.wasm diff --git a/configs/peer/genesis.json b/configs/swarm/genesis.json similarity index 100% rename from configs/peer/genesis.json rename to configs/swarm/genesis.json diff --git a/core/benches/blocks/common.rs b/core/benches/blocks/common.rs index 1bd989de4a4..a81cd11a8e3 100644 --- a/core/benches/blocks/common.rs +++ b/core/benches/blocks/common.rs @@ -27,7 +27,7 @@ pub fn create_block( account_id: AccountId, key_pair: &KeyPair, ) -> CommittedBlock { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let transaction = TransactionBuilder::new(chain_id.clone(), account_id) .with_instructions(instructions) @@ -185,12 +185,12 @@ pub fn build_wsv( ); let mut wsv = WorldStateView::new(World::with([domain], UniqueVec::new()), kura, query_handle); wsv.config.transaction_limits = TransactionLimits::new(u64::MAX, u64::MAX); - wsv.config.wasm_runtime_config.fuel_limit = u64::MAX; - wsv.config.wasm_runtime_config.max_memory = u32::MAX; + wsv.config.wasm_runtime.fuel_limit = u64::MAX; + wsv.config.wasm_runtime.max_memory_bytes = u32::MAX; { let path_to_executor = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) - .join("../configs/peer/executor.wasm"); + .join("../configs/swarm/executor.wasm"); let wasm = std::fs::read(&path_to_executor) .unwrap_or_else(|_| panic!("Failed to read file: {}", path_to_executor.display())); let executor = Executor::new(WasmSmartContract::from_compiled(wasm)); diff --git a/core/benches/kura.rs b/core/benches/kura.rs index 9dd90d7b268..5ee45f62556 100644 --- a/core/benches/kura.rs +++ b/core/benches/kura.rs @@ -4,7 +4,7 @@ use std::str::FromStr as _; use byte_unit::Byte; use criterion::{criterion_group, criterion_main, Criterion}; -use iroha_config::kura::Configuration; +use iroha_config::parameters::actual::Kura as Config; use iroha_core::{ block::*, kura::{BlockStore, LockStatus}, @@ -19,7 +19,7 @@ use iroha_primitives::unique_vec::UniqueVec; use tokio::{fs, runtime::Runtime}; async fn measure_block_size_for_n_executors(n_executors: u32) { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let alice_id = AccountId::from_str("alice@test").expect("tested"); let bob_id = AccountId::from_str("bob@test").expect("tested"); @@ -40,10 +40,10 @@ async fn measure_block_size_for_n_executors(n_executors: u32) { let tx = AcceptedTransaction::accept(tx, &chain_id, &transaction_limits) .expect("Failed to accept Transaction."); let dir = tempfile::tempdir().expect("Could not create tempfile."); - let cfg = Configuration { + let cfg = Config { init_mode: iroha_config::kura::Mode::Strict, debug_output_new_blocks: false, - block_store_path: dir.path().to_str().unwrap().into(), + store_dir: dir.path().to_path_buf(), }; let kura = iroha_core::kura::Kura::new(&cfg).unwrap(); let _thread_handle = iroha_core::kura::Kura::start(kura.clone()); diff --git a/core/benches/validation.rs b/core/benches/validation.rs index 089d6e29e2a..037e031cd12 100644 --- a/core/benches/validation.rs +++ b/core/benches/validation.rs @@ -79,7 +79,7 @@ fn build_test_and_transient_wsv(keys: KeyPair) -> WorldStateView { { let path_to_executor = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) - .join("../configs/peer/executor.wasm"); + .join("../configs/swarm/executor.wasm"); let wasm = std::fs::read(&path_to_executor) .unwrap_or_else(|_| panic!("Failed to read file: {}", path_to_executor.display())); let executor = Executor::new(WasmSmartContract::from_compiled(wasm)); @@ -93,7 +93,7 @@ fn build_test_and_transient_wsv(keys: KeyPair) -> WorldStateView { } fn accept_transaction(criterion: &mut Criterion) { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let keys = KeyPair::generate(); let transaction = build_test_transaction(&keys, chain_id.clone()); @@ -111,7 +111,7 @@ fn accept_transaction(criterion: &mut Criterion) { } fn sign_transaction(criterion: &mut Criterion) { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let keys = KeyPair::generate(); let transaction = build_test_transaction(&keys, chain_id); @@ -131,7 +131,7 @@ fn sign_transaction(criterion: &mut Criterion) { } fn validate_transaction(criterion: &mut Criterion) { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let keys = KeyPair::generate(); let transaction = AcceptedTransaction::accept( @@ -157,7 +157,7 @@ fn validate_transaction(criterion: &mut Criterion) { } fn sign_blocks(criterion: &mut Criterion) { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let keys = KeyPair::generate(); let transaction = AcceptedTransaction::accept( diff --git a/core/src/block.rs b/core/src/block.rs index e87ce6e972d..b2b0a6bc82d 100644 --- a/core/src/block.rs +++ b/core/src/block.rs @@ -6,7 +6,7 @@ //! [`Block`]s are organised into a linear sequence over time (also known as the block chain). use std::error::Error as _; -use iroha_config::sumeragi::default::DEFAULT_CONSENSUS_ESTIMATION_MS; +use iroha_config::parameters::defaults::chain_wide::DEFAULT_CONSENSUS_ESTIMATION; use iroha_crypto::{HashOf, KeyPair, MerkleTree, SignatureOf, SignaturesOf}; use iroha_data_model::{ block::*, @@ -144,7 +144,10 @@ mod pending { .as_millis() .try_into() .expect("Time should fit into u64"), - consensus_estimation_ms: DEFAULT_CONSENSUS_ESTIMATION_MS, + consensus_estimation_ms: DEFAULT_CONSENSUS_ESTIMATION + .as_millis() + .try_into() + .expect("Time should fit into u64"), height: previous_height + 1, view_change_index, previous_block_hash, @@ -437,7 +440,10 @@ mod valid { BlockBuilder(Chained(BlockPayload { header: BlockHeader { timestamp_ms: 0, - consensus_estimation_ms: DEFAULT_CONSENSUS_ESTIMATION_MS, + consensus_estimation_ms: DEFAULT_CONSENSUS_ESTIMATION + .as_millis() + .try_into() + .expect("Should never overflow?"), height: 2, view_change_index: 0, previous_block_hash: None, @@ -687,7 +693,7 @@ mod tests { #[tokio::test] async fn should_reject_due_to_repetition() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); // Predefined world state let alice_id = AccountId::from_str("alice@wonderland").expect("Valid"); @@ -730,7 +736,7 @@ mod tests { #[tokio::test] async fn tx_order_same_in_validation_and_revalidation() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); // Predefined world state let alice_id = AccountId::from_str("alice@wonderland").expect("Valid"); @@ -796,7 +802,7 @@ mod tests { #[tokio::test] async fn failed_transactions_revert() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); // Predefined world state let alice_id = AccountId::from_str("alice@wonderland").expect("Valid"); diff --git a/core/src/block_sync.rs b/core/src/block_sync.rs index 2ff4ffe05ca..e4d71aad0a1 100644 --- a/core/src/block_sync.rs +++ b/core/src/block_sync.rs @@ -1,7 +1,7 @@ //! This module contains structures and messages for synchronization of blocks between peers. -use std::{fmt::Debug, sync::Arc, time::Duration}; +use std::{fmt::Debug, num::NonZeroU32, sync::Arc, time::Duration}; -use iroha_config::block_sync::Configuration; +use iroha_config::parameters::actual::BlockSync as Config; use iroha_crypto::HashOf; use iroha_data_model::{block::SignedBlock, prelude::*}; use iroha_logger::prelude::*; @@ -36,7 +36,7 @@ pub struct BlockSynchronizer { kura: Arc, peer_id: PeerId, gossip_period: Duration, - block_batch_size: u32, + gossip_max_size: NonZeroU32, network: IrohaNetwork, latest_hash: Option>, previous_hash: Option>, @@ -104,8 +104,8 @@ impl BlockSynchronizer { } /// Create [`Self`] from [`Configuration`] - pub fn from_configuration( - config: &Configuration, + pub fn from_config( + config: &Config, sumeragi: SumeragiHandle, kura: Arc, peer_id: PeerId, @@ -117,8 +117,8 @@ impl BlockSynchronizer { peer_id, sumeragi, kura, - gossip_period: Duration::from_millis(config.gossip_period_ms), - block_batch_size: config.block_batch_size, + gossip_period: config.gossip_period, + gossip_max_size: config.gossip_max_size, network, latest_hash, previous_hash, @@ -191,10 +191,6 @@ pub mod message { previous_hash, peer_id, }) => { - if block_sync.block_batch_size == 0 { - warn!("Error: not sending any blocks as batch_size is equal to zero."); - return; - } let local_latest_block_hash = block_sync.latest_hash; if *latest_hash == local_latest_block_hash || *previous_hash == local_latest_block_hash @@ -214,7 +210,7 @@ pub mod message { }; let blocks = (start_height..) - .take(1 + block_sync.block_batch_size as usize) + .take(1 + block_sync.gossip_max_size.get() as usize) .map_while(|height| block_sync.kura.get_block_by_height(height)) .skip_while(|block| Some(block.hash()) == *latest_hash) .map(|block| (*block).clone()) diff --git a/core/src/executor.rs b/core/src/executor.rs index 62af571fe49..70eb4c5fc0a 100644 --- a/core/src/executor.rs +++ b/core/src/executor.rs @@ -157,7 +157,7 @@ impl Executor { let runtime = wasm::RuntimeBuilder::::new() .with_engine(wsv.engine.clone()) // Cloning engine is cheap, see [`wasmtime::Engine`] docs - .with_configuration(wsv.config.wasm_runtime_config) + .with_config(wsv.config.wasm_runtime) .build()?; runtime.execute_executor_validate_transaction( @@ -191,7 +191,7 @@ impl Executor { let runtime = wasm::RuntimeBuilder::::new() .with_engine(wsv.engine.clone()) // Cloning engine is cheap, see [`wasmtime::Engine`] docs - .with_configuration(wsv.config.wasm_runtime_config) + .with_config(wsv.config.wasm_runtime) .build()?; runtime.execute_executor_validate_instruction( @@ -224,7 +224,7 @@ impl Executor { Self::UserProvided(UserProvidedExecutor(loaded_executor)) => { let runtime = wasm::RuntimeBuilder::::new() .with_engine(wsv.engine.clone()) // Cloning engine is cheap, see [`wasmtime::Engine`] docs - .with_configuration(wsv.config.wasm_runtime_config) + .with_config(wsv.config.wasm_runtime) .build()?; runtime.execute_executor_validate_query( @@ -259,7 +259,7 @@ impl Executor { let runtime = wasm::RuntimeBuilder::::new() .with_engine(wsv.engine.clone()) // Cloning engine is cheap, see [`wasmtime::Engine`] docs - .with_configuration(wsv.config.wasm_runtime_config) + .with_config(wsv.config.wasm_runtime) .build()?; runtime diff --git a/core/src/gossiper.rs b/core/src/gossiper.rs index e9dbe4604e4..a67709fbb9a 100644 --- a/core/src/gossiper.rs +++ b/core/src/gossiper.rs @@ -1,8 +1,8 @@ //! Gossiper is actor which is responsible for transaction gossiping -use std::{sync::Arc, time::Duration}; +use std::{num::NonZeroU32, sync::Arc, time::Duration}; -use iroha_config::sumeragi::Configuration; +use iroha_config::parameters::actual::TransactionGossiper as Config; use iroha_data_model::{transaction::SignedTransaction, ChainId}; use iroha_p2p::Broadcast; use parity_scale_codec::{Decode, Encode}; @@ -35,7 +35,7 @@ pub struct TransactionGossiper { chain_id: ChainId, /// The size of batch that is being gossiped. Smaller size leads /// to longer time to synchronise, useful if you have high packet loss. - gossip_batch_size: u32, + gossip_max_size: NonZeroU32, /// The time between gossiping. More frequent gossiping shortens /// the time to sync, but can overload the network. gossip_period: Duration, @@ -58,10 +58,12 @@ impl TransactionGossiper { } /// Construct [`Self`] from configuration - pub fn from_configuration( + pub fn from_config( chain_id: ChainId, - // Currently we are using configuration parameters from sumeragi not to break configuration - configuration: &Configuration, + Config { + gossip_period, + gossip_max_size, + }: Config, network: IrohaNetwork, queue: Arc, sumeragi: SumeragiHandle, @@ -69,11 +71,11 @@ impl TransactionGossiper { let wsv = sumeragi.wsv_clone(); Self { chain_id, + gossip_max_size, + gossip_period, queue, - sumeragi, network, - gossip_batch_size: configuration.gossip_batch_size, - gossip_period: Duration::from_millis(configuration.gossip_period_ms), + sumeragi, wsv, } } @@ -101,7 +103,7 @@ impl TransactionGossiper { fn gossip_transactions(&self) { let txs = self .queue - .n_random_transactions(self.gossip_batch_size, &self.wsv); + .n_random_transactions(self.gossip_max_size.get(), &self.wsv); if txs.is_empty() { return; diff --git a/core/src/kiso.rs b/core/src/kiso.rs index a7f62be4449..c99add91be0 100644 --- a/core/src/kiso.rs +++ b/core/src/kiso.rs @@ -1,6 +1,6 @@ //! Actor responsible for configuration state and its dynamic updates. //! -//! Currently the API exposed by [`KisoHandle`] works only with [`ConfigurationDTO`], because +//! Currently the API exposed by [`KisoHandle`] works only with [`ConfigDTO`], because //! no any part of Iroha is interested in the whole state. However, the API could be extended //! in future. //! @@ -9,8 +9,8 @@ use eyre::Result; use iroha_config::{ - client_api::{ConfigurationDTO, Logger as LoggerDTO}, - iroha::Configuration, + client_api::{ConfigDTO, Logger as LoggerDTO}, + parameters::actual::Root as Config, }; use iroha_logger::Level; use tokio::sync::{mpsc, oneshot, watch}; @@ -27,7 +27,7 @@ pub struct KisoHandle { impl KisoHandle { /// Spawn a new actor - pub fn new(state: Configuration) -> Self { + pub fn new(state: Config) -> Self { let (actor_sender, actor_receiver) = mpsc::channel(DEFAULT_CHANNEL_SIZE); let (log_level_update, _) = watch::channel(state.logger.level); let mut actor = Actor { @@ -42,11 +42,11 @@ impl KisoHandle { } } - /// Fetch the [`ConfigurationDTO`] from the actor's state. + /// Fetch the [`ConfigDTO`] from the actor's state. /// /// # Errors /// If communication with actor fails. - pub async fn get_dto(&self) -> Result { + pub async fn get_dto(&self) -> Result { let (tx, rx) = oneshot::channel(); let msg = Message::GetDTO { respond_to: tx }; let _ = self.actor.send(msg).await; @@ -61,7 +61,7 @@ impl KisoHandle { /// /// # Errors /// If communication with actor fails. - pub async fn update_with_dto(&self, dto: ConfigurationDTO) -> Result<(), Error> { + pub async fn update_with_dto(&self, dto: ConfigDTO) -> Result<(), Error> { let (tx, rx) = oneshot::channel(); let msg = Message::UpdateWithDTO { dto, @@ -86,10 +86,10 @@ impl KisoHandle { enum Message { GetDTO { - respond_to: oneshot::Sender, + respond_to: oneshot::Sender, }, UpdateWithDTO { - dto: ConfigurationDTO, + dto: ConfigDTO, respond_to: oneshot::Sender>, }, SubscribeOnLogLevel { @@ -106,7 +106,7 @@ pub enum Error { struct Actor { handle: mpsc::Receiver, - state: Configuration, + state: Config, // Current implementation is somewhat not scalable in terms of code writing: for any // future dynamic parameter, it will require its own `subscribe_on_` function in [`KisoHandle`], // new channel here, and new [`Message`] variant. If boilerplate expands, a more general solution will be @@ -124,12 +124,12 @@ impl Actor { fn handle_message(&mut self, msg: Message) { match msg { Message::GetDTO { respond_to } => { - let dto = ConfigurationDTO::from(&self.state); + let dto = ConfigDTO::from(&self.state); let _ = respond_to.send(dto); } Message::UpdateWithDTO { dto: - ConfigurationDTO { + ConfigDTO { logger: LoggerDTO { level: new_level }, }, respond_to, @@ -151,20 +151,23 @@ mod tests { use std::time::Duration; use iroha_config::{ - base::proxy::LoadFromDisk, - client_api::{ConfigurationDTO, Logger as LoggerDTO}, - iroha::{Configuration, ConfigurationProxy}, + client_api::{ConfigDTO, Logger as LoggerDTO}, + parameters::actual::Root, }; use super::*; - fn test_config() -> Configuration { - // FIXME Specifying path here might break! Moreover, if the file is not found, - // the error will say that `public_key` is missing! - // Hopefully this will change: https://github.com/hyperledger/iroha/issues/2585 - ConfigurationProxy::from_path("../config/iroha_test_config.json") - .build() - .unwrap() + fn test_config() -> Root { + use iroha_config::parameters::user::CliContext; + + Root::load( + // FIXME Specifying path here might break! + Some("../config/iroha_test_config.toml"), + CliContext { + submit_genesis: true, + }, + ) + .expect("test config should be valid, it is probably a bug") } #[tokio::test] @@ -186,7 +189,7 @@ mod tests { .await .expect_err("Watcher should not be active initially"); - kiso.update_with_dto(ConfigurationDTO { + kiso.update_with_dto(ConfigDTO { logger: LoggerDTO { level: NEW_LOG_LEVEL, }, diff --git a/core/src/kura.rs b/core/src/kura.rs index f248248e247..c70e5557323 100644 --- a/core/src/kura.rs +++ b/core/src/kura.rs @@ -10,7 +10,7 @@ use std::{ sync::Arc, }; -use iroha_config::kura::{Configuration, Mode}; +use iroha_config::{kura::Mode, parameters::actual::Kura as Config}; use iroha_crypto::{Hash, HashOf}; use iroha_data_model::block::SignedBlock; use iroha_logger::prelude::*; @@ -49,16 +49,13 @@ impl Kura { /// Fails if there are filesystem errors when trying /// to access the block store indicated by the provided /// path. - pub fn new(config: &Configuration) -> Result> { - let block_store_path = Path::new(&config.block_store_path); - let mut block_store = BlockStore::new(block_store_path, LockStatus::Unlocked); + pub fn new(config: &Config) -> Result> { + let mut block_store = BlockStore::new(&config.store_dir, LockStatus::Unlocked); block_store.create_files_if_they_do_not_exist()?; - let block_plain_text_path = config.debug_output_new_blocks.then(|| { - let mut path_buf = block_store_path.to_path_buf(); - path_buf.push("blocks.json"); - path_buf - }); + let block_plain_text_path = config + .debug_output_new_blocks + .then(|| config.store_dir.join("blocks.json")); let kura = Arc::new(Self { mode: config.init_mode, @@ -75,7 +72,7 @@ impl Kura { pub fn blank_kura_for_testing() -> Arc { Arc::new(Self { mode: Mode::Strict, - block_store: Mutex::new(BlockStore::new(&PathBuf::new(), LockStatus::Locked)), + block_store: Mutex::new(BlockStore::new(PathBuf::new(), LockStatus::Locked)), block_data: Mutex::new(Vec::new()), block_plain_text_path: None, }) @@ -395,9 +392,9 @@ impl BlockStore { /// /// # Panics /// * if you pass in `LockStatus::Unlocked` and it is unable to lock the block store. - pub fn new(store_path: &Path, already_locked: LockStatus) -> Self { + pub fn new(store_path: impl AsRef, already_locked: LockStatus) -> Self { if matches!(already_locked, LockStatus::Unlocked) { - let lock_path = store_path.join(LOCK_FILE_NAME); + let lock_path = store_path.as_ref().join(LOCK_FILE_NAME); if let Err(e) = fs::File::options() .read(true) .write(true) @@ -407,8 +404,8 @@ impl BlockStore { match e.kind() { std::io::ErrorKind::AlreadyExists => Err(Error::Locked(lock_path)), std::io::ErrorKind::NotFound => { - match std::fs::create_dir_all(store_path) - .map_err(|e| Error::MkDir(e, store_path.to_path_buf())) + match std::fs::create_dir_all(store_path.as_ref()) + .map_err(|e| Error::MkDir(e, store_path.as_ref().to_path_buf())) { Err(e) => Err(e), Ok(()) => { @@ -431,7 +428,7 @@ impl BlockStore { } } BlockStore { - path_to_blockchain: store_path.to_path_buf(), + path_to_blockchain: store_path.as_ref().to_path_buf(), } } @@ -1049,9 +1046,9 @@ mod tests { #[tokio::test] async fn strict_init_kura() { let temp_dir = TempDir::new().unwrap(); - Kura::new(&Configuration { + Kura::new(&Config { init_mode: Mode::Strict, - block_store_path: temp_dir.path().to_str().unwrap().into(), + store_dir: temp_dir.path().to_str().unwrap().into(), debug_output_new_blocks: false, }) .unwrap() diff --git a/core/src/query/store.rs b/core/src/query/store.rs index 8e8c83c0687..6691ee24a48 100644 --- a/core/src/query/store.rs +++ b/core/src/query/store.rs @@ -7,7 +7,7 @@ use std::{ }; use indexmap::IndexMap; -use iroha_config::live_query_store::Configuration; +use iroha_config::parameters::actual::LiveQueryStore as Config; use iroha_data_model::{ asset::AssetValue, query::{ @@ -69,15 +69,15 @@ type LiveQuery = Batched>; #[derive(Debug)] pub struct LiveQueryStore { queries: IndexMap, - query_idle_time: Duration, + idle_time: Duration, } impl LiveQueryStore { /// Construct [`LiveQueryStore`] from configuration. - pub fn from_configuration(cfg: Configuration) -> Self { + pub fn from_config(cfg: Config) -> Self { Self { queries: IndexMap::new(), - query_idle_time: Duration::from_millis(cfg.query_idle_time_ms.into()), + idle_time: cfg.idle_time, } } @@ -86,13 +86,7 @@ impl LiveQueryStore { /// /// Not marked as `#[cfg(test)]` because it is used in benches as well. pub fn test() -> Self { - use iroha_config::base::proxy::Builder as _; - - LiveQueryStore::from_configuration( - iroha_config::live_query_store::ConfigurationProxy::default() - .build() - .expect("Failed to build LiveQueryStore configuration from proxy"), - ) + Self::from_config(Config::default()) } /// Start [`LiveQueryStore`]. Requires a [`tokio::runtime::Runtime`] being run @@ -105,14 +99,14 @@ impl LiveQueryStore { let (message_sender, mut message_receiver) = mpsc::channel(1); - let mut idle_interval = tokio::time::interval(self.query_idle_time); + let mut idle_interval = tokio::time::interval(self.idle_time); tokio::task::spawn(async move { loop { tokio::select! { _ = idle_interval.tick() => { self.queries - .retain(|_, (_, last_access_time)| last_access_time.elapsed() <= self.query_idle_time); + .retain(|_, (_, last_access_time)| last_access_time.elapsed() <= self.idle_time); }, msg = message_receiver.recv() => { let Some(msg) = msg else { diff --git a/core/src/queue.rs b/core/src/queue.rs index 53eeb75f4fb..3beab0a9546 100644 --- a/core/src/queue.rs +++ b/core/src/queue.rs @@ -1,11 +1,12 @@ //! Module with queue actor use core::time::Duration; +use std::num::NonZeroUsize; use crossbeam_queue::ArrayQueue; use dashmap::{mapref::entry::Entry, DashMap}; use eyre::{Report, Result}; use indexmap::IndexSet; -use iroha_config::queue::Configuration; +use iroha_config::parameters::actual::Queue as Config; use iroha_crypto::HashOf; use iroha_data_model::{account::AccountId, transaction::prelude::*}; use iroha_logger::{debug, trace, warn}; @@ -53,9 +54,9 @@ pub struct Queue { /// Amount of transactions per user in the queue txs_per_user: DashMap, /// The maximum number of transactions in the queue - max_txs: usize, + capacity: NonZeroUsize, /// The maximum number of transactions in the queue per user. Used to apply throttling - max_txs_per_user: usize, + capacity_per_user: NonZeroUsize, /// Length of time after which transactions are dropped. pub tx_time_to_live: Duration, /// A point in time that is considered `Future` we cannot use @@ -98,15 +99,15 @@ pub struct Failure { impl Queue { /// Makes queue from configuration - pub fn from_configuration(cfg: &Configuration) -> Self { + pub fn from_config(cfg: Config) -> Self { Self { - tx_hashes: ArrayQueue::new(cfg.max_transactions_in_queue as usize), + tx_hashes: ArrayQueue::new(cfg.capacity.get()), accepted_txs: DashMap::new(), txs_per_user: DashMap::new(), - max_txs: cfg.max_transactions_in_queue as usize, - max_txs_per_user: cfg.max_transactions_in_queue_per_user as usize, - tx_time_to_live: Duration::from_millis(cfg.transaction_time_to_live_ms), - future_threshold: Duration::from_millis(cfg.future_threshold_ms), + capacity: cfg.capacity, + capacity_per_user: cfg.capacity_per_user, + tx_time_to_live: cfg.transaction_time_to_live, + future_threshold: cfg.future_threshold, } } @@ -114,11 +115,7 @@ impl Queue { !self.is_expired(tx) && !tx.is_in_blockchain(wsv) } - /// Checks if this transaction is waiting longer than specified in - /// `transaction_time_to_live` from `QueueConfiguration` or - /// `time_to_live_ms` of this transaction. Meaning that the - /// transaction will be expired as soon as the lesser of the - /// specified TTLs was reached. + /// Checks if the transaction is waiting longer than its TTL or than the TTL from [`Config`]. pub fn is_expired(&self, tx: &AcceptedTransaction) -> bool { let tx_creation_time = tx.as_ref().creation_time(); @@ -209,9 +206,9 @@ impl Queue { } Entry::Vacant(entry) => entry, }; - if txs_len >= self.max_txs { + if txs_len >= self.capacity.get() { warn!( - max = self.max_txs, + max = self.capacity, "Achieved maximum amount of transactions" ); return Err(Failure { @@ -349,9 +346,9 @@ impl Queue { } Entry::Occupied(mut occupied) => { let txs = *occupied.get(); - if txs >= self.max_txs_per_user { + if txs >= self.capacity_per_user.get() { warn!( - max_txs_per_user = self.max_txs_per_user, + max_txs_per_user = self.capacity_per_user, %account_id, "Account reached maximum allowed number of transactions in the queue per user" ); @@ -382,7 +379,6 @@ impl Queue { mod tests { use std::{str::FromStr, sync::Arc, thread, time::Duration}; - use iroha_config::{base::proxy::Builder, queue::ConfigurationProxy}; use iroha_data_model::{prelude::*, transaction::TransactionLimits}; use iroha_primitives::must_use::MustUse; use rand::Rng as _; @@ -395,7 +391,7 @@ mod tests { }; fn accepted_tx(account_id: &str, key: &KeyPair) -> AcceptedTransaction { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let message = std::iter::repeat_with(rand::random::) .take(16) @@ -425,6 +421,14 @@ mod tests { World::with([domain], PeersIds::new()) } + fn config_factory() -> Config { + Config { + transaction_time_to_live: Duration::from_secs(100), + capacity: 100.try_into().unwrap(), + ..Config::default() + } + } + #[test] async fn push_tx() { let key_pair = KeyPair::generate(); @@ -436,13 +440,7 @@ mod tests { query_handle, )); - let queue = Queue::from_configuration(&Configuration { - transaction_time_to_live_ms: 100_000, - max_transactions_in_queue: 100, - ..ConfigurationProxy::default() - .build() - .expect("Default queue config should always build") - }); + let queue = Queue::from_config(config_factory()); queue .push(accepted_tx("alice@wonderland", &key_pair), &wsv) @@ -451,7 +449,7 @@ mod tests { #[test] async fn push_tx_overflow() { - let max_txs_in_queue = 10; + let capacity = NonZeroUsize::new(10).unwrap(); let key_pair = KeyPair::generate(); let kura = Kura::blank_kura_for_testing(); @@ -462,15 +460,13 @@ mod tests { query_handle, )); - let queue = Queue::from_configuration(&Configuration { - transaction_time_to_live_ms: 100_000, - max_transactions_in_queue: max_txs_in_queue, - ..ConfigurationProxy::default() - .build() - .expect("Default queue config should always build") + let queue = Queue::from_config(Config { + transaction_time_to_live: Duration::from_secs(100), + capacity, + ..Config::default() }); - for _ in 0..max_txs_in_queue { + for _ in 0..capacity.get() { queue .push(accepted_tx("alice@wonderland", &key_pair), &wsv) .expect("Failed to push tx into queue"); @@ -488,7 +484,7 @@ mod tests { #[test] async fn push_multisignature_tx() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let max_txs_in_block = 2; let key_pairs = [KeyPair::generate(), KeyPair::generate()]; @@ -512,13 +508,7 @@ mod tests { )) }; - let queue = Queue::from_configuration(&Configuration { - transaction_time_to_live_ms: 100_000, - max_transactions_in_queue: 100, - ..ConfigurationProxy::default() - .build() - .expect("Default queue config should always build") - }); + let queue = Queue::from_config(config_factory()); let instructions: [InstructionBox; 0] = []; let tx = TransactionBuilder::new(chain_id.clone(), "alice@wonderland".parse().expect("Valid")) @@ -581,12 +571,9 @@ mod tests { kura, query_handle, )); - let queue = Queue::from_configuration(&Configuration { - transaction_time_to_live_ms: 100_000, - max_transactions_in_queue: 100, - ..ConfigurationProxy::default() - .build() - .expect("Default queue config should always build") + let queue = Queue::from_config(Config { + transaction_time_to_live: Duration::from_secs(100), + ..config_factory() }); for _ in 0..5 { queue @@ -611,13 +598,7 @@ mod tests { ); let tx = accepted_tx("alice@wonderland", &alice_key); wsv.transactions.insert(tx.as_ref().hash(), 1); - let queue = Queue::from_configuration(&Configuration { - transaction_time_to_live_ms: 100_000, - max_transactions_in_queue: 100, - ..ConfigurationProxy::default() - .build() - .expect("Default queue config should always build") - }); + let queue = Queue::from_config(config_factory()); assert!(matches!( queue.push(tx, &wsv), Err(Failure { @@ -640,13 +621,7 @@ mod tests { query_handle, ); let tx = accepted_tx("alice@wonderland", &alice_key); - let queue = Queue::from_configuration(&Configuration { - transaction_time_to_live_ms: 100_000, - max_transactions_in_queue: 100, - ..ConfigurationProxy::default() - .build() - .expect("Default queue config should always build") - }); + let queue = Queue::from_config(config_factory()); queue.push(tx.clone(), &wsv).unwrap(); wsv.transactions.insert(tx.as_ref().hash(), 1); assert_eq!( @@ -669,12 +644,9 @@ mod tests { kura, query_handle, )); - let queue = Queue::from_configuration(&Configuration { - transaction_time_to_live_ms: 200, - max_transactions_in_queue: 100, - ..ConfigurationProxy::default() - .build() - .expect("Default queue config should always build") + let queue = Queue::from_config(Config { + transaction_time_to_live: Duration::from_millis(200), + ..config_factory() }); for _ in 0..(max_txs_in_block - 1) { queue @@ -719,13 +691,7 @@ mod tests { kura, query_handle, )); - let queue = Queue::from_configuration(&Configuration { - transaction_time_to_live_ms: 100_000, - max_transactions_in_queue: 100, - ..ConfigurationProxy::default() - .build() - .expect("Default queue config should always build") - }); + let queue = Queue::from_config(config_factory()); queue .push(accepted_tx("alice@wonderland", &alice_key), &wsv) .expect("Failed to push tx into queue"); @@ -748,7 +714,7 @@ mod tests { async fn custom_expired_transaction_is_rejected() { const TTL_MS: u64 = 100; - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let max_txs_in_block = 2; let alice_key = KeyPair::generate(); @@ -759,13 +725,7 @@ mod tests { kura, query_handle, )); - let queue = Queue::from_configuration(&Configuration { - transaction_time_to_live_ms: 100_000, - max_transactions_in_queue: 100, - ..ConfigurationProxy::default() - .build() - .expect("Default queue config should always build") - }); + let queue = Queue::from_config(config_factory()); let instructions = [Fail { message: "expired".to_owned(), }]; @@ -806,12 +766,10 @@ mod tests { query_handle, ); - let queue = Arc::new(Queue::from_configuration(&Configuration { - transaction_time_to_live_ms: 100_000, - max_transactions_in_queue: 100_000_000, - ..ConfigurationProxy::default() - .build() - .expect("Default queue config should always build") + let queue = Arc::new(Queue::from_config(Config { + transaction_time_to_live: Duration::from_secs(100), + capacity: 100_000_000.try_into().unwrap(), + ..Config::default() })); let start_time = std::time::Instant::now(); @@ -869,7 +827,7 @@ mod tests { #[test] async fn push_tx_in_future() { - let future_threshold_ms = 1000; + let future_threshold = Duration::from_secs(1); let alice_id = "alice@wonderland"; let alice_key = KeyPair::generate(); @@ -881,26 +839,23 @@ mod tests { query_handle, )); - let queue = Queue::from_configuration(&Configuration { - future_threshold_ms, - ..ConfigurationProxy::default() - .build() - .expect("Default queue config should always build") + let queue = Queue::from_config(Config { + future_threshold, + ..Config::default() }); let tx = accepted_tx(alice_id, &alice_key); assert!(queue.push(tx.clone(), &wsv).is_ok()); // create the same tx but with timestamp in the future let tx = { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let mut new_tx = TransactionBuilder::new( chain_id.clone(), AccountId::from_str(alice_id).expect("Valid"), ) .with_executable(tx.0.instructions().clone()); - let creation_time: u64 = tx.0.creation_time().as_millis().try_into().unwrap(); - new_tx.set_creation_time(creation_time + 2 * future_threshold_ms); + new_tx.set_creation_time(tx.0.creation_time() + future_threshold * 2); let new_tx = new_tx.sign(&alice_key); let limits = TransactionLimits { @@ -945,13 +900,11 @@ mod tests { let query_handle = LiveQueryStore::test().start(); let mut wsv = WorldStateView::new(world, kura, query_handle); - let queue = Queue::from_configuration(&Configuration { - transaction_time_to_live_ms: 100_000, - max_transactions_in_queue: 100, - max_transactions_in_queue_per_user: 1, - ..ConfigurationProxy::default() - .build() - .expect("Default queue config should always build") + let queue = Queue::from_config(Config { + transaction_time_to_live: Duration::from_secs(100), + capacity: 100.try_into().unwrap(), + capacity_per_user: 1.try_into().unwrap(), + ..Config::default() }); // First push by Alice should be fine diff --git a/core/src/smartcontracts/isi/query.rs b/core/src/smartcontracts/isi/query.rs index db2ab543e21..f7695a93c1c 100644 --- a/core/src/smartcontracts/isi/query.rs +++ b/core/src/smartcontracts/isi/query.rs @@ -249,7 +249,7 @@ mod tests { valid_tx_per_block: usize, invalid_tx_per_block: usize, ) -> Result { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let kura = Kura::blank_kura_for_testing(); let query_handle = LiveQueryStore::test().start(); @@ -411,7 +411,7 @@ mod tests { #[test] async fn find_transaction() -> Result<()> { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let kura = Kura::blank_kura_for_testing(); let query_handle = LiveQueryStore::test().start(); diff --git a/core/src/smartcontracts/wasm.rs b/core/src/smartcontracts/wasm.rs index b361bc97b04..ec431e8e458 100644 --- a/core/src/smartcontracts/wasm.rs +++ b/core/src/smartcontracts/wasm.rs @@ -6,10 +6,7 @@ use error::*; use import::traits::{ ExecuteOperations as _, GetExecutorPayloads as _, SetPermissionTokenSchema as _, }; -use iroha_config::{ - base::proxy::Builder, - wasm::{Configuration, ConfigurationProxy}, -}; +use iroha_config::parameters::actual::WasmRuntime as Config; use iroha_data_model::{ account::AccountId, executor::{self, MigrationResult}, @@ -28,7 +25,8 @@ use iroha_logger::debug; use iroha_logger::{error_span as wasm_log_span, prelude::tracing::Span}; use iroha_wasm_codec::{self as codec, WasmUsize}; use wasmtime::{ - Caller, Config, Engine, Linker, Module, Store, StoreLimits, StoreLimitsBuilder, TypedFunc, + Caller, Config as WasmtimeConfig, Engine, Linker, Module, Store, StoreLimits, + StoreLimitsBuilder, TypedFunc, }; use crate::{ @@ -268,8 +266,8 @@ pub fn create_engine() -> Engine { .expect("Failed to create WASM engine with a predefined configuration. This is a bug") } -fn create_config() -> Result { - let mut config = Config::new(); +fn create_config() -> Result { + let mut config = WasmtimeConfig::new(); config .consume_fuel(true) .cache_config_load_default() @@ -343,13 +341,9 @@ pub mod state { /// /// Panics if failed to convert `u32` into `usize` which should not happen /// on any supported platform - pub fn store_limits_from_config(config: &Configuration) -> StoreLimits { + pub fn store_limits_from_config(config: &Config) -> StoreLimits { StoreLimitsBuilder::new() - .memory_size( - config.max_memory.try_into().expect( - "config.max_memory is a u32 so this can't fail on any supported platform", - ), - ) + .memory_size(config.max_memory_bytes as usize) .instances(1) .memories(1) .tables(1) @@ -374,7 +368,7 @@ pub mod state { /// Create new [`OrdinaryState`] pub fn new( authority: AccountId, - config: Configuration, + config: Config, log_span: Span, wsv: W, specific_state: S, @@ -567,7 +561,7 @@ pub mod state { pub struct Runtime { engine: Engine, linker: Linker, - config: Configuration, + config: Config, } impl Runtime { @@ -595,7 +589,7 @@ impl Runtime { fn get_typed_func( instance: &wasmtime::Instance, - mut store: &mut wasmtime::Store, + mut store: &mut Store, func_name: &'static str, ) -> Result, ExportError> { instance @@ -1409,7 +1403,7 @@ impl<'wrld> import::traits::SetPermissionTokenSchema { engine: Option, - config: Option, + config: Option, linker: Option>, } @@ -1434,7 +1428,7 @@ impl RuntimeBuilder { /// Sets the [`Configuration`] to be used by the [`Runtime`] #[must_use] #[inline] - pub fn with_configuration(mut self, config: Configuration) -> Self { + pub fn with_config(mut self, config: Config) -> Self { self.config = Some(config); self } @@ -1451,11 +1445,7 @@ impl RuntimeBuilder { Ok(Runtime { engine, linker, - config: self.config.unwrap_or_else(|| { - ConfigurationProxy::default() - .build() - .expect("Error building WASM Runtime configuration from proxy. This is a bug") - }), + config: self.config.unwrap_or_default(), }) } } diff --git a/core/src/snapshot.rs b/core/src/snapshot.rs index 52aad1bd6ee..22e7e3762b9 100644 --- a/core/src/snapshot.rs +++ b/core/src/snapshot.rs @@ -6,7 +6,7 @@ use std::{ time::Duration, }; -use iroha_config::snapshot::Configuration; +use iroha_config::parameters::actual::Snapshot as Config; use iroha_crypto::HashOf; use iroha_data_model::block::SignedBlock; use iroha_logger::prelude::*; @@ -137,11 +137,11 @@ impl SnapshotMaker { } /// Create [`Self`] from [`Configuration`] - pub fn from_configuration(config: &Configuration, sumeragi: SumeragiHandle) -> Self { + pub fn from_config(config: &Config, sumeragi: SumeragiHandle) -> Self { Self { sumeragi, - snapshot_create_every: Duration::from_millis(config.create_every_ms), - snapshot_dir: config.dir_path.clone(), + snapshot_create_every: config.create_every, + snapshot_dir: config.store_dir.clone(), snapshot_creation_enabled: config.creation_enabled, new_wsv_available: false, } diff --git a/core/src/sumeragi/main_loop.rs b/core/src/sumeragi/main_loop.rs index 06c8e2c2b97..b545e281338 100644 --- a/core/src/sumeragi/main_loop.rs +++ b/core/src/sumeragi/main_loop.rs @@ -1271,7 +1271,7 @@ mod tests { #[test] #[allow(clippy::redundant_clone)] async fn block_sync_invalid_block() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let leader_key_pair = KeyPair::generate(); let topology = Topology::new(unique_vec![PeerId::new( @@ -1291,7 +1291,7 @@ mod tests { #[test] async fn block_sync_invalid_soft_fork_block() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let leader_key_pair = KeyPair::generate(); let topology = Topology::new(unique_vec![PeerId::new( @@ -1322,7 +1322,7 @@ mod tests { #[test] #[allow(clippy::redundant_clone)] async fn block_sync_not_proper_height() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let topology = Topology::new(UniqueVec::new()); let leader_key_pair = KeyPair::generate(); @@ -1349,7 +1349,7 @@ mod tests { #[test] #[allow(clippy::redundant_clone)] async fn block_sync_commit_block() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let leader_key_pair = KeyPair::generate(); let topology = Topology::new(unique_vec![PeerId::new( @@ -1365,7 +1365,7 @@ mod tests { #[test] async fn block_sync_replace_top_block() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let leader_key_pair = KeyPair::generate(); let topology = Topology::new(unique_vec![PeerId::new( @@ -1393,7 +1393,7 @@ mod tests { #[test] async fn block_sync_small_view_change_index() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let leader_key_pair = KeyPair::generate(); let topology = Topology::new(unique_vec![PeerId::new( @@ -1434,7 +1434,7 @@ mod tests { #[test] #[allow(clippy::redundant_clone)] async fn block_sync_genesis_block_do_not_replace() { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let topology = Topology::new(UniqueVec::new()); let leader_key_pair = KeyPair::generate(); diff --git a/core/src/sumeragi/mod.rs b/core/src/sumeragi/mod.rs index 165f273bc4f..b08525a4ea1 100644 --- a/core/src/sumeragi/mod.rs +++ b/core/src/sumeragi/mod.rs @@ -8,7 +8,7 @@ use std::{ }; use eyre::{Result, WrapErr as _}; -use iroha_config::sumeragi::Configuration; +use iroha_config::parameters::actual::{Common as CommonConfig, Sumeragi as SumeragiConfig}; use iroha_crypto::{KeyPair, SignatureOf}; use iroha_data_model::{block::SignedBlock, prelude::*}; use iroha_genesis::GenesisNetwork; @@ -257,8 +257,8 @@ impl SumeragiHandle { #[allow(clippy::too_many_lines)] pub fn start( SumeragiStartArgs { - chain_id, - configuration, + sumeragi_config, + common_config, events_sender, mut wsv, queue, @@ -281,8 +281,8 @@ impl SumeragiHandle { let mut current_topology = match wsv.height() { 0 => { - assert!(!configuration.trusted_peers.peers.is_empty()); - Topology::new(configuration.trusted_peers.peers.clone()) + assert!(!sumeragi_config.trusted_peers.is_empty()); + Topology::new(sumeragi_config.trusted_peers.clone()) } height => { let block_ref = kura.get_block_by_height(height).expect( @@ -296,14 +296,16 @@ impl SumeragiHandle { let block_iter_except_last = (&mut blocks_iter).take(block_count.saturating_sub(skip_block_count + 1)); for block in block_iter_except_last { - current_topology = Self::replay_block(&chain_id, &block, &mut wsv, current_topology); + current_topology = + Self::replay_block(&common_config.chain_id, &block, &mut wsv, current_topology); } // finalized_wsv is one block behind let finalized_wsv = wsv.clone(); if let Some(block) = blocks_iter.next() { - current_topology = Self::replay_block(&chain_id, &block, &mut wsv, current_topology); + current_topology = + Self::replay_block(&common_config.chain_id, &block, &mut wsv, current_topology); } info!("Sumeragi has finished loading blocks and setting up the WSV"); @@ -313,21 +315,23 @@ impl SumeragiHandle { watch::channel(finalized_wsv.clone()); #[cfg(debug_assertions)] - let debug_force_soft_fork = configuration.debug_force_soft_fork; + let debug_force_soft_fork = sumeragi_config.debug_force_soft_fork; #[cfg(not(debug_assertions))] let debug_force_soft_fork = false; + let peer_id = common_config.peer_id(); + let sumeragi = main_loop::Sumeragi { - chain_id, - key_pair: configuration.key_pair.clone(), + chain_id: common_config.chain_id, + key_pair: common_config.key_pair, + peer_id, queue: Arc::clone(&queue), - peer_id: configuration.peer_id.clone(), events_sender, public_wsv_sender, public_finalized_wsv_sender, - commit_time: Duration::from_millis(configuration.commit_time_limit_ms), - block_time: Duration::from_millis(configuration.block_time_ms), - max_txs_in_block: configuration.max_transactions_in_block as usize, + commit_time: wsv.config.commit_time, + block_time: wsv.config.block_time, + max_txs_in_block: wsv.config.max_transactions_in_block.get() as usize, kura: Arc::clone(&kura), network: network.clone(), control_message_receiver, @@ -419,8 +423,8 @@ impl VotingBlock { /// Arguments for [`SumeragiHandle::start`] function #[allow(missing_docs)] pub struct SumeragiStartArgs { - pub chain_id: ChainId, - pub configuration: Box, + pub sumeragi_config: SumeragiConfig, + pub common_config: CommonConfig, pub events_sender: EventsSender, pub wsv: WorldStateView, pub queue: Arc, diff --git a/core/src/sumeragi/network_topology.rs b/core/src/sumeragi/network_topology.rs index 0d2def7260d..cb8f51089b5 100644 --- a/core/src/sumeragi/network_topology.rs +++ b/core/src/sumeragi/network_topology.rs @@ -285,7 +285,7 @@ macro_rules! test_peers { }}; ($($id:literal),+$(,)?: $key_pair_iter:expr) => { ::iroha_primitives::unique_vec![ - $(PeerId::new(([0, 0, 0, 0], $id).into(), $key_pair_iter.next().expect("Not enough key pairs").public_key().clone())),+ + $(PeerId::new((([0, 0, 0, 0], $id).into()), $key_pair_iter.next().expect("Not enough key pairs").public_key().clone())),+ ] }; } diff --git a/core/src/wsv.rs b/core/src/wsv.rs index 4cabb24465a..17dee33d3df 100644 --- a/core/src/wsv.rs +++ b/core/src/wsv.rs @@ -7,10 +7,7 @@ use std::{ use eyre::Result; use indexmap::IndexMap; -use iroha_config::{ - base::proxy::Builder, - wsv::{Configuration, ConfigurationProxy}, -}; +use iroha_config::parameters::actual::ChainWide as Config; use iroha_crypto::HashOf; use iroha_data_model::{ account::AccountId, @@ -270,7 +267,7 @@ pub struct WorldStateView { /// The world. Contains `domains`, `triggers`, `roles` and other data representing the current state of the blockchain. pub world: World, /// Configuration of World State View. - pub config: Configuration, + pub config: Config, /// Blockchain. pub block_hashes: Vec>, /// Hashes of transactions mapped onto block height where they stored @@ -400,10 +397,7 @@ impl WorldStateView { #[inline] pub fn new(world: World, kura: Arc, query_handle: LiveQueryStoreHandle) -> Self { // Added to remain backward compatible with other code primary in tests - let config = ConfigurationProxy::default() - .build() - .expect("Wsv proxy always builds"); - Self::from_configuration(config, world, kura, query_handle) + Self::from_config(Config::default(), world, kura, query_handle) } /// Get `Account`'s `Asset`s @@ -527,7 +521,7 @@ impl WorldStateView { } Wasm(LoadedWasm { module, .. }) => { let mut wasm_runtime = wasm::RuntimeBuilder::::new() - .with_configuration(self.config.wasm_runtime_config) + .with_config(self.config.wasm_runtime) .with_engine(self.engine.clone()) // Cloning engine is cheap .build()?; wasm_runtime @@ -590,7 +584,7 @@ impl WorldStateView { } Executable::Wasm(bytes) => { let mut wasm_runtime = wasm::RuntimeBuilder::::new() - .with_configuration(self.config.wasm_runtime_config) + .with_config(self.config.wasm_runtime) .with_engine(self.engine.clone()) // Cloning engine is cheap .build()?; wasm_runtime @@ -680,25 +674,24 @@ impl WorldStateView { fn apply_parameters(&mut self) { use iroha_data_model::parameter::default::*; + macro_rules! update_params { - ($ident:ident, $($param:expr => $config:expr),+ $(,)?) => { + ($($param:expr => $config:expr),+ $(,)?) => { $(if let Some(param) = self.query_param($param) { - let $ident = &mut self.config; $config = param; })+ - }; } + update_params! { - config, - WSV_ASSET_METADATA_LIMITS => config.asset_metadata_limits, - WSV_ASSET_DEFINITION_METADATA_LIMITS => config.asset_definition_metadata_limits, - WSV_ACCOUNT_METADATA_LIMITS => config.account_metadata_limits, - WSV_DOMAIN_METADATA_LIMITS => config.domain_metadata_limits, - WSV_IDENT_LENGTH_LIMITS => config.ident_length_limits, - WASM_FUEL_LIMIT => config.wasm_runtime_config.fuel_limit, - WASM_MAX_MEMORY => config.wasm_runtime_config.max_memory, - TRANSACTION_LIMITS => config.transaction_limits, + WSV_ASSET_METADATA_LIMITS => self.config.asset_metadata_limits, + WSV_ASSET_DEFINITION_METADATA_LIMITS => self.config.asset_definition_metadata_limits, + WSV_ACCOUNT_METADATA_LIMITS => self.config.account_metadata_limits, + WSV_DOMAIN_METADATA_LIMITS => self.config.domain_metadata_limits, + WSV_IDENT_LENGTH_LIMITS => self.config.ident_length_limits, + WASM_FUEL_LIMIT => self.config.wasm_runtime.fuel_limit, + WASM_MAX_MEMORY => self.config.wasm_runtime.max_memory_bytes, + TRANSACTION_LIMITS => self.config.transaction_limits, } } @@ -922,8 +915,8 @@ impl WorldStateView { /// Construct [`WorldStateView`] with specific [`Configuration`]. #[inline] - pub fn from_configuration( - config: Configuration, + pub fn from_config( + config: Config, world: World, kura: Arc, query_handle: LiveQueryStoreHandle, diff --git a/core/test_network/src/lib.rs b/core/test_network/src/lib.rs index f283d63c292..1c187480a74 100644 --- a/core/test_network/src/lib.rs +++ b/core/test_network/src/lib.rs @@ -9,19 +9,14 @@ use futures::{prelude::*, stream::FuturesUnordered}; use iroha::Iroha; use iroha_client::{ client::{Client, QueryOutput}, + config::Config as ClientConfig, data_model::{isi::Instruction, peer::Peer as DataModelPeer, prelude::*, query::Query, Level}, }; -use iroha_config::{ - base::proxy::{LoadFromEnv, Override}, - client::Configuration as ClientConfiguration, - iroha::{Configuration, ConfigurationProxy}, - sumeragi::Configuration as SumeragiConfiguration, - torii::Configuration as ToriiConfiguration, -}; +use iroha_config::parameters::actual::Root as Config; use iroha_crypto::prelude::*; use iroha_data_model::ChainId; use iroha_genesis::{GenesisNetwork, RawGenesisBlock}; -use iroha_logger::{Configuration as LoggerConfiguration, InstrumentFutures}; +use iroha_logger::InstrumentFutures; use iroha_primitives::{ addr::{socket_addr, SocketAddr}, unique_vec, @@ -52,7 +47,7 @@ pub struct Network { /// Get a standardized blockchain id pub fn get_chain_id() -> ChainId { - ChainId::new("0") + ChainId::from("0") } /// Get a standardised key-pair from the hard-coded literals. @@ -81,12 +76,12 @@ pub trait TestGenesis: Sized { impl TestGenesis for GenesisNetwork { fn test_with_instructions(extra_isi: impl IntoIterator) -> Self { - let cfg = Configuration::test(); + let cfg = Config::test(); // TODO: Fix this somehow. Probably we need to make `kagami` a library (#3253). let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR")); let mut genesis = - RawGenesisBlock::from_path(manifest_dir.join("../../configs/peer/genesis.json")) + RawGenesisBlock::from_path(manifest_dir.join("../../configs/swarm/genesis.json")) .expect("Failed to deserialize genesis block from file"); let rose_definition_id = @@ -131,13 +126,15 @@ impl TestGenesis for GenesisNetwork { first_transaction.append_instruction(isi); } - let chain_id = ChainId::new("0"); - let key_pair = KeyPair::new( - cfg.genesis.public_key.clone(), - cfg.genesis.private_key.expect("Should be"), - ) - .expect("Genesis key pair should be valid"); - GenesisNetwork::new(genesis, &chain_id, &key_pair).expect("Failed to init genesis") + GenesisNetwork::new(genesis, &cfg.common.chain_id, { + use iroha_config::parameters::actual::Genesis; + if let Genesis::Full { key_pair, .. } = &cfg.genesis { + key_pair + } else { + unreachable!("test config should contain full genesis config (or it is a bug)") + } + }) + .expect("Failed to init genesis") } } @@ -178,16 +175,12 @@ impl Network { offline_peers: u32, start_port: Option, ) -> (Self, Client) { - let mut configuration = Configuration::test(); - configuration.logger.level = Level::INFO; - let network = Network::new_with_offline_peers( - Some(configuration), - n_peers, - offline_peers, - start_port, - ) - .await - .expect("Failed to init peers"); + let mut config = Config::test(); + config.logger.level = Level::INFO; + let network = + Network::new_with_offline_peers(Some(config), n_peers, offline_peers, start_port) + .await + .expect("Failed to init peers"); let client = Client::test( &Network::peers(&network) .choose(&mut thread_rng()) @@ -218,17 +211,17 @@ impl Network { .api_address, ); - let mut config = Configuration::test(); - config.sumeragi.trusted_peers.peers = + let mut config = Config::test(); + config.sumeragi.trusted_peers = UniqueVec::from_iter(self.peers().map(|peer| &peer.id).cloned()); let peer = PeerBuilder::new() - .with_configuration(config) + .with_config(config) .with_genesis(GenesisNetwork::test()) .start() .await; - time::sleep(Configuration::pipeline_time() + Configuration::block_sync_gossip_time()).await; + time::sleep(Config::pipeline_time() + Config::block_sync_gossip_time()).await; let add_peer = Register::peer(DataModelPeer::new(peer.id.clone())); client.submit(add_peer).expect("Failed to add new peer."); @@ -248,7 +241,7 @@ impl Network { /// - (RARE) Creating new peers and collecting into a [`HashMap`] fails. /// - Creating new [`Peer`] instance fails. pub async fn new_with_offline_peers( - default_configuration: Option, + default_config: Option, n_peers: u32, offline_peers: u32, start_port: Option, @@ -273,12 +266,12 @@ impl Network { .map(PeerBuilder::build) .collect::>>()?; - let mut configuration = default_configuration.unwrap_or_else(Configuration::test); - configuration.sumeragi.trusted_peers.peers = + let mut config = default_config.unwrap_or_else(Config::test); + config.sumeragi.trusted_peers = UniqueVec::from_iter(peers.iter().map(|peer| peer.id.clone())); let mut genesis_peer = peers.remove(0); - let genesis_builder = builders.remove(0).with_configuration(configuration.clone()); + let genesis_builder = builders.remove(0).with_config(config.clone()); // Offset by one to account for genesis let online_peers = n_peers - offline_peers - 1; @@ -292,11 +285,7 @@ impl Network { .zip(peers.iter_mut()) .choose_multiple(rng, online_peers as usize) { - futures.push( - builder - .with_configuration(configuration.clone()) - .start_with_peer(peer), - ); + futures.push(builder.with_config(config.clone()).start_with_peer(peer)); } futures.collect::<()>().await; @@ -400,36 +389,32 @@ impl Drop for Peer { impl Peer { /// Returns per peer config with all addresses, keys, and id set up. - fn get_config(&self, configuration: Configuration) -> Configuration { - Configuration { - sumeragi: Box::new(SumeragiConfiguration { + fn get_config(&self, config: Config) -> Config { + use iroha_config::parameters::actual::{Common, Torii}; + + Config { + common: Common { key_pair: self.key_pair.clone(), - peer_id: self.id.clone(), - ..*configuration.sumeragi - }), - torii: Box::new(ToriiConfiguration { - p2p_addr: self.p2p_address.clone(), - api_url: self.api_address.clone(), - ..*configuration.torii - }), - logger: Box::new(LoggerConfiguration { - ..*configuration.logger - }), - public_key: self.key_pair.public_key().clone(), - private_key: self.key_pair.private_key().clone(), - ..configuration + p2p_address: self.p2p_address.clone(), + ..config.common + }, + torii: Torii { + address: self.api_address.clone(), + ..config.torii + }, + ..config } } /// Starts a peer with arguments. async fn start( &mut self, - configuration: Configuration, + config: Config, genesis: Option, temp_dir: Arc, ) { - let mut configuration = self.get_config(configuration); - configuration.kura.block_store_path = temp_dir.path().to_str().unwrap().into(); + let mut config = self.get_config(config); + config.kura.store_dir = temp_dir.path().to_str().unwrap().into(); let info_span = iroha_logger::info_span!( "test-peer", p2p_addr = %self.p2p_address, @@ -440,7 +425,7 @@ impl Peer { let handle = task::spawn( async move { - let mut iroha = Iroha::new(configuration, genesis, logger) + let mut iroha = Iroha::new(config, genesis, logger) .await .expect("Failed to start iroha"); let job_handle = iroha.start_as_task().unwrap(); @@ -523,7 +508,7 @@ impl>> From for WithGenesis { /// `PeerBuilder`. #[derive(Default)] pub struct PeerBuilder { - configuration: Option, + config: Option, genesis: WithGenesis, temp_dir: Option>, port: Option, @@ -567,8 +552,8 @@ impl PeerBuilder { /// Set Iroha configuration #[must_use] - pub fn with_configuration(mut self, configuration: Configuration) -> Self { - self.configuration = Some(configuration); + pub fn with_config(mut self, config: Config) -> Self { + self.config = Some(config); self } @@ -602,9 +587,9 @@ impl PeerBuilder { /// Accept a peer and starts it. pub async fn start_with_peer(self, peer: &mut Peer) { - let configuration = self.configuration.unwrap_or_else(|| { - let mut config = Configuration::test(); - config.sumeragi.trusted_peers.peers = unique_vec![peer.id.clone()]; + let config = self.config.unwrap_or_else(|| { + let mut config = Config::test(); + config.sumeragi.trusted_peers = unique_vec![peer.id.clone()]; config }); let genesis = match self.genesis { @@ -616,7 +601,7 @@ impl PeerBuilder { .temp_dir .unwrap_or_else(|| Arc::new(TempDir::new().expect("Failed to create temp dir."))); - peer.start(configuration, genesis, temp_dir).await; + peer.start(config, genesis, temp_dir).await; } /// Create and start a peer with preapplied arguments. @@ -628,19 +613,13 @@ impl PeerBuilder { /// Create and start a peer, create a client and connect it to the peer and return both. pub async fn start_with_client(self) -> (Peer, Client) { - let configuration = self - .configuration - .clone() - .unwrap_or_else(Configuration::test); + let config = self.config.clone().unwrap_or_else(Config::test); let peer = self.start().await; let client = Client::test(&peer.api_address); - time::sleep(Duration::from_millis( - configuration.sumeragi.pipeline_time_ms(), - )) - .await; + time::sleep(config.chain_wide.pipeline_time()).await; (peer, client) } @@ -666,7 +645,7 @@ pub trait TestRuntime { } /// Peer configuration mocking trait. -pub trait TestConfiguration { +pub trait TestConfig { /// Creates test configuration fn test() -> Self; /// Returns default pipeline time. @@ -676,9 +655,9 @@ pub trait TestConfiguration { } /// Client configuration mocking trait. -pub trait TestClientConfiguration { +pub trait TestClientConfig { /// Creates test client configuration - fn test(api_url: &SocketAddr) -> Self; + fn test(api_address: &SocketAddr) -> Self; } /// Client mocking trait @@ -766,63 +745,70 @@ impl TestRuntime for Runtime { } } -impl TestConfiguration for Configuration { +impl TestConfig for Config { fn test() -> Self { - let mut sample_proxy = iroha::samples::get_config_proxy( - UniqueVec::new(), + use iroha_config::{ + base::{FromEnv as _, StdEnv, UnwrapPartial as _}, + parameters::user::{CliContext, RootPartial}, + }; + + let mut layer = iroha::samples::get_user_config( + &UniqueVec::new(), Some(get_chain_id()), Some(get_key_pair()), - ); - let env_proxy = - ConfigurationProxy::from_std_env().expect("Test env variables should parse properly"); + ) + .merge(RootPartial::from_env(&StdEnv).expect("test env variables should parse properly")); + let (public_key, private_key) = KeyPair::generate().into(); - sample_proxy.public_key = Some(public_key); - sample_proxy.private_key = Some(private_key); - sample_proxy.override_with(env_proxy) - .build() - .expect("Test Iroha config failed to build. This is either a programmer error or a compiler bug.") + layer.public_key.set(public_key); + layer.private_key.set(private_key); + + layer + .unwrap_partial() + .expect("should not fail as all fields are present") + .parse(CliContext { + submit_genesis: true, + }) + .expect("Test Iroha config failed to build. This is likely to be a bug.") } fn pipeline_time() -> Duration { - Duration::from_millis(Self::test().sumeragi.pipeline_time_ms()) + Self::test().chain_wide.pipeline_time() } fn block_sync_gossip_time() -> Duration { - Duration::from_millis(Self::test().block_sync.gossip_period_ms) + Self::test().block_sync.gossip_period } } -impl TestClientConfiguration for ClientConfiguration { - fn test(api_url: &SocketAddr) -> Self { - let mut configuration = - iroha_client::samples::get_client_config(get_chain_id(), &get_key_pair()); - configuration.torii_api_url = format!("http://{api_url}") - .parse() - .expect("Should be valid url"); - configuration +impl TestClientConfig for ClientConfig { + fn test(api_address: &SocketAddr) -> Self { + iroha_client::samples::get_client_config( + get_chain_id(), + get_key_pair().clone(), + format!("http://{api_address}") + .parse() + .expect("should be valid url"), + ) } } impl TestClient for Client { - fn test(api_url: &SocketAddr) -> Self { - Client::new(&ClientConfiguration::test(api_url)).expect("Invalid client configuration") + fn test(api_addr: &SocketAddr) -> Self { + Client::new(ClientConfig::test(api_addr)) } - fn test_with_key(api_url: &SocketAddr, keys: KeyPair) -> Self { - let mut configuration = ClientConfiguration::test(api_url); - let (public_key, private_key) = keys.into(); - configuration.public_key = public_key; - configuration.private_key = private_key; - Client::new(&configuration).expect("Invalid client configuration") + fn test_with_key(api_addr: &SocketAddr, keys: KeyPair) -> Self { + let mut config = ClientConfig::test(api_addr); + config.key_pair = keys; + Client::new(config) } - fn test_with_account(api_url: &SocketAddr, keys: KeyPair, account_id: &AccountId) -> Self { - let mut configuration = ClientConfiguration::test(api_url); - configuration.account_id = account_id.clone(); - let (public_key, private_key) = keys.into(); - configuration.public_key = public_key; - configuration.private_key = private_key; - Client::new(&configuration).expect("Invalid client configuration") + fn test_with_account(api_addr: &SocketAddr, keys: KeyPair, account_id: &AccountId) -> Self { + let mut config = ClientConfig::test(api_addr); + config.account_id = account_id.clone(); + config.key_pair = keys; + Client::new(config) } fn for_each_event(self, event_filter: FilterBox, f: impl Fn(Result)) { @@ -899,6 +885,6 @@ impl TestClient for Client { ::Target: core::fmt::Debug, >::Error: Into, { - self.poll_request_with_period(request, Configuration::pipeline_time() / 2, 10, f) + self.poll_request_with_period(request, Config::pipeline_time() / 2, 10, f) } } diff --git a/crypto/src/lib.rs b/crypto/src/lib.rs index 853af121700..715a9603cf8 100755 --- a/crypto/src/lib.rs +++ b/crypto/src/lib.rs @@ -657,6 +657,14 @@ impl PrivateKey { PrivateKeyInner::BlsSmall(key) => key.to_bytes(), } } + + /// Extracts the raw bytes from the private key, copying the payload. + /// + /// `into_raw()` without copying is not provided because underlying crypto + /// libraries do not provide move functionality. + pub fn to_raw(&self) -> (Algorithm, Vec) { + (self.algorithm(), self.payload()) + } } #[cfg(not(feature = "ffi_import"))] diff --git a/data_model/src/lib.rs b/data_model/src/lib.rs index 3c7c6fcee4f..561228a73cc 100644 --- a/data_model/src/lib.rs +++ b/data_model/src/lib.rs @@ -592,6 +592,15 @@ pub mod model { #[ffi_type(unsafe {robust})] pub struct ChainId(Box); + impl From for ChainId + where + T: Into>, + { + fn from(value: T) -> Self { + ChainId(value.into()) + } + } + /// Sized container for all possible identifications. #[derive( Debug, @@ -965,7 +974,6 @@ pub mod model { /// Log level for reading from environment and (de)serializing #[derive( Debug, - Display, Clone, Copy, Default, @@ -979,6 +987,8 @@ pub mod model { Decode, FromRepr, IntoSchema, + strum::Display, + strum::EnumString, )] #[allow(clippy::upper_case_acronyms)] #[repr(u8)] @@ -1010,13 +1020,6 @@ pub mod model { /// in the next request to continue fetching results of the original query pub cursor: crate::query::cursor::ForwardCursor, } - - impl ChainId { - /// Create new [`Self`] - pub fn new(inner: &str) -> Self { - Self(inner.into()) - } - } } impl Decode for ChainId { @@ -1024,7 +1027,17 @@ impl Decode for ChainId { input: &mut I, ) -> Result { let boxed: String = parity_scale_codec::Decode::decode(input)?; - Ok(Self::new(&boxed)) + Ok(Self::from(boxed)) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn parse_level_from_str() { + assert_eq!("INFO".parse::().unwrap(), Level::INFO); } } diff --git a/data_model/src/transaction.rs b/data_model/src/transaction.rs index 63a8a682953..bd13a66cf28 100644 --- a/data_model/src/transaction.rs +++ b/data_model/src/transaction.rs @@ -735,8 +735,9 @@ mod http { } /// Set creation time of transaction - pub fn set_creation_time(&mut self, creation_time_ms: u64) -> &mut Self { - self.payload.creation_time_ms = creation_time_ms; + pub fn set_creation_time(&mut self, value: Duration) -> &mut Self { + self.payload.creation_time_ms = + u64::try_from(value.as_millis()).expect("should never exceed u64"); self } diff --git a/default_executor/README.md b/default_executor/README.md index a404dd83950..0fe04b6fe24 100644 --- a/default_executor/README.md +++ b/default_executor/README.md @@ -4,5 +4,5 @@ Use the [Wasm Builder CLI](../tools/wasm_builder_cli) in order to build it: ```bash cargo run --bin iroha_wasm_builder_cli -- \ - build ./default_executor --optimize --outfile ./configs/peer/executor.wasm + build ./default_executor --optimize --outfile ./configs/swarm/executor.wasm ``` \ No newline at end of file diff --git a/docker-compose.single.yml b/docker-compose.single.yml deleted file mode 100644 index 240d84cf190..00000000000 --- a/docker-compose.single.yml +++ /dev/null @@ -1,31 +0,0 @@ -# This file is generated by iroha_swarm. -# Do not edit it manually. - -version: '3.8' -services: - iroha0: - build: ./ - platform: linux/amd64 - environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"}' - TORII_P2P_ADDR: 0.0.0.0:1337 - TORII_API_URL: 0.0.0.0:8080 - IROHA_GENESIS_PUBLIC_KEY: ed01204164BF554923ECE1FD412D241036D863A6AE430476C898248B8237D77534CFC4 - IROHA_GENESIS_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"82b3bde54aebeca4146257da0de8d59d8e46d5fe34887dcd8072866792fcb3ad4164bf554923ece1fd412d241036d863a6ae430476c898248b8237d77534cfc4"}' - IROHA_GENESIS_FILE: /config/genesis.json - ports: - - 1337:1337 - - 8080:8080 - volumes: - - ./configs/peer:/config - init: true - command: iroha --submit-genesis - healthcheck: - test: test $(curl -s http://127.0.0.1:8080/status/blocks) -gt 0 - interval: 2s - timeout: 1s - retries: 30 - start_period: 4s diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index c23d7c3b900..b9eab5b2b2e 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -43,9 +43,7 @@ impl GenesisNetwork { /// Construct [`GenesisNetwork`] from configuration. /// /// # Errors - /// - If fails to sign a transaction (which means that the `key_pair` is malformed rather - /// than anything else) - /// - If transactions set is empty + /// If fails to resolve the executor pub fn new( raw_block: RawGenesisBlock, chain_id: &ChainId, @@ -82,6 +80,7 @@ pub struct RawGenesisBlock { /// Transactions transactions: Vec, /// Runtime Executor + // TODO `RawGenesisBlock` should have evaluated executor, i.e. loaded executor: ExecutorMode, } @@ -93,7 +92,7 @@ impl RawGenesisBlock { /// /// # Errors /// If file not found or deserialization from file fails. - pub fn from_path + Debug>(path: P) -> Result { + pub fn from_path>(path: P) -> Result { let file = File::open(&path) .wrap_err_with(|| eyre!("Failed to open {}", path.as_ref().display()))?; let size = file @@ -104,8 +103,12 @@ impl RawGenesisBlock { eprintln!("Genesis is quite large, it will take some time to apply it (size = {}, threshold = {})", size, Self::WARN_ON_GENESIS_GTE); } let reader = BufReader::new(file); - let mut raw_genesis_block: Self = serde_json::from_reader(reader) - .wrap_err_with(|| eyre!("Failed to deserialize raw genesis block from {:?}", &path))?; + let mut raw_genesis_block: Self = serde_json::from_reader(reader).wrap_err_with(|| { + eyre!( + "Failed to deserialize raw genesis block from {:?}", + path.as_ref().display() + ) + })?; raw_genesis_block.executor.set_genesis_path(path); Ok(raw_genesis_block) } @@ -135,6 +138,7 @@ impl ExecutorMode { } } +/// Loads the executor from the path or uses the inline blob for conversion impl TryFrom for Executor { type Error = ErrReport; @@ -356,7 +360,7 @@ mod tests { #[test] fn load_new_genesis_block() -> Result<()> { - let chain_id = ChainId::new("0"); + let chain_id = ChainId::from("0"); let genesis_key_pair = KeyPair::generate(); let (alice_public_key, _) = KeyPair::generate().into(); diff --git a/logger/src/lib.rs b/logger/src/lib.rs index f84ddc6a7d8..87a5ac3ed60 100644 --- a/logger/src/lib.rs +++ b/logger/src/lib.rs @@ -13,8 +13,11 @@ use std::{ use actor::LoggerHandle; use color_eyre::{eyre::eyre, Report, Result}; -pub use iroha_config::logger::{Configuration, ConfigurationProxy, Format, Level}; -use iroha_config::{base::proxy::Builder, logger::into_tracing_level}; +use iroha_config::logger::into_tracing_level; +pub use iroha_config::{ + logger::{Format, Level}, + parameters::actual::Logger as Config, +}; use tracing::subscriber::set_global_default; pub use tracing::{ debug, debug_span, error, error_span, info, info_span, instrument as log, trace, trace_span, @@ -50,7 +53,7 @@ fn try_set_logger() -> Result<()> { /// If the logger is already set, raises a generic error. // TODO: refactor configuration in a way that `terminal_colors` is part of it // https://github.com/hyperledger/iroha/issues/3500 -pub fn init_global(configuration: &Configuration, terminal_colors: bool) -> Result { +pub fn init_global(configuration: &Config, terminal_colors: bool) -> Result { try_set_logger()?; let layer = tracing_subscriber::fmt::layer() @@ -69,7 +72,6 @@ pub fn init_global(configuration: &Configuration, terminal_colors: bool) -> Resu /// /// # Panics /// If [`init_global`] or [`disable_global`] were called first. -#[allow(clippy::needless_update)] // `tokio-console` feature adds additional fields to Configuration pub fn test_logger() -> LoggerHandle { static LOGGER: OnceLock = OnceLock::new(); @@ -80,10 +82,11 @@ pub fn test_logger() -> LoggerHandle { // with ENV vars rather than by extending `test_logger` signature. This will both remain // `test_logger` simple and also will emphasise isolation which is necessary anyway in // case of singleton mocking (where the logger is the singleton). - let config = Configuration { + #[allow(clippy::needless_update)] // triggers without "tokio-console" feature + let config = Config { level: Level::DEBUG, format: Format::Pretty, - ..ConfigurationProxy::default().build().unwrap() + ..Config::default() }; init_global(&config, true).expect( @@ -103,7 +106,7 @@ pub fn disable_global() -> Result<()> { try_set_logger() } -fn step2(configuration: &Configuration, layer: L) -> Result +fn step2(configuration: &Config, layer: L) -> Result where L: tracing_subscriber::Layer + Debug + Send + Sync + 'static, { diff --git a/logger/tests/setting_logger.rs b/logger/tests/setting_logger.rs index 209a6b45928..6f118366562 100644 --- a/logger/tests/setting_logger.rs +++ b/logger/tests/setting_logger.rs @@ -1,11 +1,8 @@ -use iroha_config::base::proxy::Builder; -use iroha_logger::{init_global, ConfigurationProxy}; +use iroha_logger::{init_global, Config}; #[tokio::test] async fn setting_logger_twice_fails() { - let cfg = ConfigurationProxy::default() - .build() - .expect("Default logger config always builds"); + let cfg = Config::default(); let first = init_global(&cfg, false); assert!(first.is_ok()); diff --git a/macro/utils/Cargo.toml b/macro/utils/Cargo.toml index 08c1dce1270..6ee6c66a572 100644 --- a/macro/utils/Cargo.toml +++ b/macro/utils/Cargo.toml @@ -19,4 +19,4 @@ darling = { workspace = true } quote = { workspace = true } proc-macro2 = { workspace = true } manyhow = { workspace = true } -drop_bomb = "0.1.5" +drop_bomb = { workspace = true } diff --git a/p2p/src/network.rs b/p2p/src/network.rs index 453e4d23ad4..c867746b435 100644 --- a/p2p/src/network.rs +++ b/p2p/src/network.rs @@ -321,7 +321,7 @@ impl NetworkBase { } for public_key in to_disconnect { - self.disconnect_peer(public_key) + self.disconnect_peer(&public_key) } } @@ -344,14 +344,14 @@ impl NetworkBase { ); } - fn disconnect_peer(&mut self, public_key: PublicKey) { - let peer = match self.peers.remove(&public_key) { + fn disconnect_peer(&mut self, public_key: &PublicKey) { + let peer = match self.peers.remove(public_key) { Some(peer) => peer, _ => return iroha_logger::warn!(?public_key, "Not found peer to disconnect"), }; iroha_logger::debug!(listen_addr = %self.listen_addr, %peer.conn_id, "Disconnecting peer"); - let peer_id = PeerId::new(peer.p2p_addr, public_key); + let peer_id = PeerId::new(peer.p2p_addr, public_key.clone()); Self::remove_online_peer(&self.online_peers_sender, &peer_id); } diff --git a/p2p/tests/integration/p2p.rs b/p2p/tests/integration/p2p.rs index 61acb9ddaa0..a1b688231e0 100644 --- a/p2p/tests/integration/p2p.rs +++ b/p2p/tests/integration/p2p.rs @@ -3,15 +3,14 @@ use std::{ fmt::Debug, sync::{ atomic::{AtomicU32, Ordering}, - Arc, Once, + Arc, }, }; use futures::{prelude::*, stream::FuturesUnordered, task::AtomicWaker}; -use iroha_config_base::proxy::Builder; use iroha_crypto::KeyPair; use iroha_data_model::prelude::PeerId; -use iroha_logger::{prelude::*, ConfigurationProxy}; +use iroha_logger::{prelude::*, test_logger}; use iroha_p2p::{network::message::*, NetworkHandle}; use iroha_primitives::addr::socket_addr; use parity_scale_codec::{Decode, Encode}; @@ -24,16 +23,7 @@ use tokio::{ struct TestMessage(String); fn setup_logger() { - static INIT: Once = Once::new(); - - INIT.call_once(|| { - let mut config = ConfigurationProxy::default() - .build() - .expect("Default logger config failed to build. This is a programmer error"); - config.level = iroha_logger::Level::TRACE; - config.format = iroha_logger::Format::Pretty; - iroha_logger::init_global(&config, true).unwrap(); - }) + test_logger(); } /// This test creates a network and one peer. diff --git a/scripts/requirements.txt b/scripts/requirements.txt new file mode 100644 index 00000000000..6b0fcf8b9ef --- /dev/null +++ b/scripts/requirements.txt @@ -0,0 +1 @@ +tomli_w==1.0.0 diff --git a/scripts/test_env.py b/scripts/test_env.py index b6d3b858910..13301c1c8ee 100755 --- a/scripts/test_env.py +++ b/scripts/test_env.py @@ -17,8 +17,10 @@ import time import urllib.error import urllib.request -import uuid +import tomli_w +SWARM_CONFIGS_DIRECTORY = pathlib.Path("configs/swarm") +SHARED_CONFIG_FILE_NAME = "config.base.toml" class Network: """ @@ -27,36 +29,32 @@ class Network: def __init__(self, args: argparse.Namespace): logging.info("Setting up test environment...") - self.out_dir = args.out_dir - peers_dir = args.out_dir.joinpath("peers") + self.out_dir = pathlib.Path(args.out_dir) + peers_dir = self.out_dir / "peers" os.makedirs(peers_dir, exist_ok=True) - self.shared_env = dict(os.environ) self.peers = [_Peer(args, i) for i in range(args.n_peers)] - try: - shutil.copy2(f"{args.root_dir}/configs/peer/config.json", peers_dir) - # genesis should be supplied only for the first peer - peer_0_dir = self.peers[0].peer_dir - shutil.copy2(f"{args.root_dir}/configs/peer/genesis.json", peer_0_dir) - # assuming that `genesis.json` contains path to the executor as `./executor.wasm` - shutil.copy2(f"{args.root_dir}/configs/peer/executor.wasm", peer_0_dir) - except FileNotFoundError: - logging.error(f"Some of the config files are missing. \ - Please provide them in the `{args.root_dir}/configs/peer` directory") - sys.exit(1) - copy_or_prompt_build_bin("iroha", args.root_dir, peers_dir) + logging.info("Generating shared configuration...") + trusted_peers = [{"address": f"{peer.host_ip}:{peer.p2p_port}", "public_key": peer.public_key} for peer in self.peers] + shared_config = { + "chain_id": "00000000-0000-0000-0000-000000000000", + "genesis": { + "public_key": self.peers[0].public_key + }, + "sumeragi": { + "trusted_peers": trusted_peers + }, + "logger": { + "level": "INFO", + "format": "pretty", + } + } + with open(peers_dir / SHARED_CONFIG_FILE_NAME, "wb") as f: + tomli_w.dump(shared_config, f) - self.shared_env["IROHA_CHAIN_ID"] = "00000000-0000-0000-0000-000000000000" - self.shared_env["IROHA_CONFIG"] = str(peers_dir.joinpath("config.json")) - self.shared_env["IROHA_GENESIS_PUBLIC_KEY"] = self.peers[0].public_key + copy_or_prompt_build_bin("iroha", args.root_dir, peers_dir) - logging.info("Generating trusted peers...") - self.trusted_peers = [] - for peer in self.peers: - peer_entry = {"address": f"{peer.host_ip}:{peer.p2p_port}", "public_key": peer.public_key} - self.trusted_peers.append(json.dumps(peer_entry)) - self.shared_env["SUMERAGI_TRUSTED_PEERS"] = f"[{','.join(self.trusted_peers)}]" def wait_for_genesis(self, n_tries: int): for i in range(n_tries): @@ -79,7 +77,7 @@ def wait_for_genesis(self, n_tries: int): def run(self): for i, peer in enumerate(self.peers): - peer.run(shared_env=self.shared_env, submit_genesis=(i == 0)) + peer.run(submit_genesis=(i == 0)) self.wait_for_genesis(20) class _Peer: @@ -93,14 +91,15 @@ def __init__(self, args: argparse.Namespace, nth: int): self.p2p_port = 1337 + nth self.api_port = 8080 + nth self.tokio_console_port = 5555 + nth - self.out_dir = args.out_dir - self.root_dir = args.root_dir - self.peer_dir = self.out_dir.joinpath(f"peers/{self.name}") + self.out_dir = pathlib.Path(args.out_dir) + self.root_dir = pathlib.Path(args.root_dir) + self.peer_dir = self.out_dir / "peers" / self.name + self.config_path = self.peer_dir / "config.toml" self.host_ip = args.host_ip logging.info(f"Peer {self.name} generating key pair...") - command = [f"{self.out_dir}/kagami", "crypto", "-j"] + command = [self.out_dir / "kagami", "crypto", "-j"] if args.peer_name_as_seed: command.extend(["-s", self.name]) kagami = subprocess.run(command, capture_output=True) @@ -108,42 +107,67 @@ def __init__(self, args: argparse.Namespace, nth: int): logging.error("Kagami failed to generate a key pair.") sys.exit(3) str_keypair = kagami.stdout - json_keypair = json.loads(str_keypair) - # public key is a string, private key is a json object - self.public_key = json_keypair['public_key'] - self.private_key = json.dumps(json_keypair['private_key']) + # dict with `{ public_key: string, private_key: { digest_function: string, payload: string } }` + self.key_pair = json.loads(str_keypair) os.makedirs(self.peer_dir, exist_ok=True) - os.makedirs(self.peer_dir.joinpath("storage"), exist_ok=True) + config = { + "extends": f"../{SHARED_CONFIG_FILE_NAME}", + "public_key": self.public_key, + "private_key": self.private_key, + "network": { + "address": f"{self.host_ip}:{self.p2p_port}" + }, + "torii": { + "address": f"{self.host_ip}:{self.api_port}" + }, + "kura": { + "store_dir": "storage" + }, + "snapshot": { + "store_dir": "storage/snapshot" + }, + # it is not available in debug iroha build + # "logger": { + # "tokio_console_addr": f"{self.host_ip}:{self.tokio_console_port}", + # } + } + if nth == 0: + try: + shutil.copy2(self.root_dir / SWARM_CONFIGS_DIRECTORY / "genesis.json", self.peer_dir) + # assuming that `genesis.json` contains path to the executor as `./executor.wasm` + shutil.copy2(self.root_dir / SWARM_CONFIGS_DIRECTORY / "executor.wasm", self.peer_dir) + except FileNotFoundError: + target = self.root_dir / SWARM_CONFIGS_DIRECTORY + logging.error(f"Some of the config files are missing. \ + Please provide them in the `{target}` directory") + sys.exit(1) + config["genesis"] = { + "private_key": self.private_key, + "file": "./genesis.json" + } + with open(self.config_path, "wb") as f: + tomli_w.dump(config, f) logging.info(f"Peer {self.name} initialized") - def run(self, shared_env: dict(), submit_genesis: bool = False): - logging.info(f"Running peer {self.name}...") + @property + def public_key(self): + return self.key_pair["public_key"] + + @property + def private_key(self): + return self.key_pair["private_key"] - peer_env = dict(shared_env) - peer_env["KURA_BLOCK_STORE_PATH"] = str(self.peer_dir.joinpath("storage")) - peer_env["SNAPSHOT_DIR_PATH"] = str(self.peer_dir.joinpath("storage")) - peer_env["LOG_LEVEL"] = "INFO" - peer_env["LOG_FORMAT"] = '"pretty"' - peer_env["LOG_TOKIO_CONSOLE_ADDR"] = f"{self.host_ip}:{self.tokio_console_port}" - peer_env["IROHA_PUBLIC_KEY"] = self.public_key - peer_env["IROHA_PRIVATE_KEY"] = self.private_key - peer_env["SUMERAGI_DEBUG_FORCE_SOFT_FORK"] = "false" - peer_env["TORII_P2P_ADDR"] = f"{self.host_ip}:{self.p2p_port}" - peer_env["TORII_API_URL"] = f"{self.host_ip}:{self.api_port}" - - if submit_genesis: - peer_env["IROHA_GENESIS_PRIVATE_KEY"] = self.private_key - # Assuming it was copied to the peer's directory - peer_env["IROHA_GENESIS_FILE"] = str(self.peer_dir.joinpath("genesis.json")) + def run(self, submit_genesis: bool = False): + logging.info(f"Running peer {self.name}...") # FD never gets closed - stdout_file = open(self.peer_dir.joinpath(".stdout"), "w") - stderr_file = open(self.peer_dir.joinpath(".stderr"), "w") + stdout_file = open(self.peer_dir / ".stdout", "w") + stderr_file = open(self.peer_dir / ".stderr", "w") # These processes are created detached from the parent process already - subprocess.Popen([self.name] + (["--submit-genesis"] if submit_genesis else []), - executable=f"{self.out_dir}/peers/iroha", env=peer_env, stdout=stdout_file, stderr=stderr_file) + subprocess.Popen([self.name, "--config", self.config_path] + (["--submit-genesis"] if submit_genesis else []), + executable=self.out_dir / "peers/iroha", stdout=stdout_file, stderr=stderr_file) def pos_int(arg): if int(arg) > 0: @@ -152,8 +176,9 @@ def pos_int(arg): raise argparse.ArgumentTypeError(f"Argument {arg} must be a positive integer") def copy_or_prompt_build_bin(bin_name: str, root_dir: pathlib.Path, target_dir: pathlib.Path): + bin_path = root_dir / "target/debug" / bin_name try: - shutil.copy2(f"{root_dir}/target/debug/{bin_name}", target_dir) + shutil.copy2(bin_path, target_dir) except FileNotFoundError: logging.error(f"The binary `{bin_name}` wasn't found in `{root_dir}` directory") while True: @@ -163,7 +188,7 @@ def copy_or_prompt_build_bin(bin_name: str, root_dir: pathlib.Path, target_dir: ["cargo", "build", "--bin", bin_name], cwd=root_dir ) - shutil.copy2(f"{root_dir}/target/debug/{bin_name}", target_dir) + shutil.copy2(bin_path, target_dir) break elif prompt.lower() in ["n", "no"]: logging.critical("Can't launch the network without the binary. Aborting...") @@ -195,7 +220,7 @@ def setup(args: argparse.Namespace): copy_or_prompt_build_bin("iroha_client_cli", args.root_dir, args.out_dir) with open(os.path.join(args.out_dir, "metadata.json"), "w") as f: f.write('{"comment":{"String": "Hello Meta!"}}') - shutil.copy2(f"{args.root_dir}/configs/client/config.json", args.out_dir) + shutil.copy2(pathlib.Path(args.root_dir) / SWARM_CONFIGS_DIRECTORY / "client.toml", args.out_dir) copy_or_prompt_build_bin("kagami", args.root_dir, args.out_dir) Network(args).run() diff --git a/scripts/tests/consistency.sh b/scripts/tests/consistency.sh index 190024d2135..ba3a34531f5 100755 --- a/scripts/tests/consistency.sh +++ b/scripts/tests/consistency.sh @@ -3,18 +3,8 @@ set -e case $1 in "genesis") - cargo run --release --bin kagami -- genesis --executor-path-in-genesis ./executor.wasm | diff - configs/peer/genesis.json || { - echo 'Please re-generate the genesis with `cargo run --release --bin kagami -- genesis --executor-path-in-genesis ./executor.wasm > configs/peer/genesis.json`' - exit 1 - };; - "client") - cargo run --release --bin kagami -- config client | diff - configs/client/config.json || { - echo 'Please re-generate client config with `cargo run --release --bin kagami -- config client > configs/client/config.json`' - exit 1 - };; - "peer") - cargo run --release --bin kagami -- config peer | diff - configs/peer/config.json || { - echo 'Please re-generate peer config with `cargo run --release --bin kagami -- config peer > configs/peer/config.json`' + cargo run --release --bin kagami -- genesis --executor-path-in-genesis ./executor.wasm | diff - configs/swarm/genesis.json || { + echo 'Please re-generate the genesis with `cargo run --release --bin kagami -- genesis --executor-path-in-genesis ./executor.wasm > configs/swarm/genesis.json`' exit 1 };; "schema") @@ -29,7 +19,7 @@ case $1 in # FIXME: not nice; add an option to `kagami swarm` to print content into stdout? # it is not a default behaviour because Kagami resolves `build` path relative # to the output file location - temp_file="docker-compose.TMP.yml" + temp_file="configs/swarm/docker-compose.TMP.yml" full_cmd="$cmd_base --outfile $temp_file" eval "$full_cmd" @@ -40,19 +30,19 @@ case $1 in } command_base_for_single() { - echo "cargo run --release --bin iroha_swarm -- -p 1 -s Iroha --force --config-dir ./configs/peer --health-check --build ." + echo "cargo run --release --bin iroha_swarm -- -p 1 -s Iroha --force --config-dir ./configs/swarm --health-check --build ." } command_base_for_multiple_local() { - echo "cargo run --release --bin iroha_swarm -- -p 4 -s Iroha --force --config-dir ./configs/peer --health-check --build ." + echo "cargo run --release --bin iroha_swarm -- -p 4 -s Iroha --force --config-dir ./configs/swarm --health-check --build ." } command_base_for_default() { - echo "cargo run --release --bin iroha_swarm -- -p 4 -s Iroha --force --config-dir ./configs/peer --health-check --image hyperledger/iroha2:dev" + echo "cargo run --release --bin iroha_swarm -- -p 4 -s Iroha --force --config-dir ./configs/swarm --health-check --image hyperledger/iroha2:dev" } - do_check "$(command_base_for_single)" "docker-compose.single.yml" - do_check "$(command_base_for_multiple_local)" "docker-compose.local.yml" - do_check "$(command_base_for_default)" "docker-compose.yml" + do_check "$(command_base_for_single)" "configs/swarm/docker-compose.single.yml" + do_check "$(command_base_for_multiple_local)" "configs/swarm/docker-compose.local.yml" + do_check "$(command_base_for_default)" "configs/swarm/docker-compose.yml" esac diff --git a/scripts/tests/panic_on_invalid_genesis.sh b/scripts/tests/panic_on_invalid_genesis.sh index ed95926b645..6ce79c0476e 100755 --- a/scripts/tests/panic_on_invalid_genesis.sh +++ b/scripts/tests/panic_on_invalid_genesis.sh @@ -1,6 +1,7 @@ #!/bin/bash set -ex # Setup env +# FIXME: these are obsolete export TORII_P2P_ADDR='127.0.0.1:1341' export TORII_API_URL='127.0.0.1:8084' export IROHA_PUBLIC_KEY='ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B' @@ -18,6 +19,6 @@ trap 'rm -rf -- "$IROHA2_GENESIS_PATH" "$KURA_BLOCK_STORE_PATH"' EXIT # Create invalid genesis # NewAssetDefinition replaced with AssetDefinition -sed 's/NewAssetDefinition/AssetDefinition/' ./configs/peer/genesis.json > $IROHA2_GENESIS_PATH +sed 's/NewAssetDefinition/AssetDefinition/' ./configs/swarm/genesis.json > $IROHA2_GENESIS_PATH timeout 1m target/debug/iroha --submit-genesis 2>&1 | tee /dev/stderr | grep -q 'Transaction validation failed in genesis block' diff --git a/telemetry/src/dev.rs b/telemetry/src/dev.rs index 674b7bca748..257b980165d 100644 --- a/telemetry/src/dev.rs +++ b/telemetry/src/dev.rs @@ -1,7 +1,7 @@ //! Module with development telemetry use eyre::{Result, WrapErr}; -use iroha_config::telemetry::DevTelemetryConfig; +use iroha_config::parameters::actual::DevTelemetry as Config; use iroha_logger::telemetry::Event as Telemetry; use tokio::{ fs::OpenOptions, @@ -14,12 +14,7 @@ use tokio_stream::{wrappers::BroadcastStream, StreamExt}; /// Starts telemetry writing to a file /// # Errors /// Fails if unable to open the file -pub async fn start( - DevTelemetryConfig { - file: telemetry_file, - }: DevTelemetryConfig, - telemetry: Receiver, -) -> Result> { +pub async fn start(config: Config, telemetry: Receiver) -> Result> { let mut stream = crate::futures::get_stream(BroadcastStream::new(telemetry).fuse()); let mut file = OpenOptions::new() @@ -30,7 +25,7 @@ pub async fn start( //.append(true) .create(true) .truncate(true) - .open(telemetry_file) + .open(config.out_file) .await .wrap_err("Failed to create and open file for telemetry")?; diff --git a/telemetry/src/lib.rs b/telemetry/src/lib.rs index 8ba2ec2e2fb..0fb3ec02ebd 100644 --- a/telemetry/src/lib.rs +++ b/telemetry/src/lib.rs @@ -7,7 +7,9 @@ pub mod metrics; mod retry_period; pub mod ws; -pub use iroha_config::telemetry::Configuration; +pub use iroha_config::parameters::actual::{ + DevTelemetry as DevTelemetryConfig, Telemetry as TelemetryConfig, +}; pub use iroha_telemetry_derive::metrics; pub mod msg { diff --git a/telemetry/src/retry_period.rs b/telemetry/src/retry_period.rs index b27d1b7d7fa..9f0f1d5eb24 100644 --- a/telemetry/src/retry_period.rs +++ b/telemetry/src/retry_period.rs @@ -1,10 +1,12 @@ -//! Retry period that is calculated as `min_period * 2 ^ min(exponent, max_exponent)` +//! Period for re-entrant polling + +use std::time::Duration; /// Period for re-entrant polling #[derive(Clone, Copy, Debug)] pub struct RetryPeriod { /// The minimum period - min_period: u64, + min_period: Duration, /// The maximum exponent max_exponent: u8, /// The current exponent @@ -13,7 +15,7 @@ pub struct RetryPeriod { impl RetryPeriod { /// Constructs a new object - pub const fn new(min_period: u64, max_exponent: u8) -> Self { + pub const fn new(min_period: Duration, max_exponent: u8) -> Self { Self { min_period, max_exponent, @@ -30,27 +32,25 @@ impl RetryPeriod { } } - /// Returns the period - pub fn period(&mut self) -> u64 { - let mult = 2_u64.saturating_pow(self.exponent.into()); + /// Retry period that is calculated as `min_period * 2 ^ min(exponent, max_exponent)` + pub fn period(&mut self) -> Duration { + let mult = 2_u32.saturating_pow(self.exponent.into()); self.min_period.saturating_mul(mult) } } #[cfg(test)] mod tests { + use super::*; + #[test] fn increase_exponent_saturates() { - let mut period = super::RetryPeriod { - min_period: 32000_u64, - max_exponent: u8::MAX, - exponent: (u8::MAX - 1), - }; - println!("testing {period:?}"); - let old = period.period(); - period.increase_exponent(); - assert_eq!(period.period(), 2_u64.saturating_mul(old)); - period.increase_exponent(); - assert_eq!(period.period(), 2_u64.saturating_mul(old)); + let mut value = RetryPeriod::new(Duration::from_secs(42), 10); + println!("testing {value:?}"); + let initial_period = value.period(); + value.increase_exponent(); + assert_eq!(value.period(), initial_period.saturating_mul(2)); + value.increase_exponent(); + assert_eq!(value.period(), initial_period.saturating_mul(4)); } } diff --git a/telemetry/src/ws.rs b/telemetry/src/ws.rs index c8f1486e76c..e09d038854b 100644 --- a/telemetry/src/ws.rs +++ b/telemetry/src/ws.rs @@ -1,10 +1,9 @@ //! Telemetry sent to a server -use std::time::Duration; use chrono::Local; use eyre::{eyre, Result}; use futures::{stream::SplitSink, Sink, SinkExt, StreamExt}; -use iroha_config::telemetry::RegularTelemetryConfig; +use iroha_config::parameters::actual::Telemetry as Config; use iroha_logger::telemetry::Event as Telemetry; use serde_json::Map; use tokio::{ @@ -29,12 +28,12 @@ const INTERNAL_CHANNEL_CAPACITY: usize = 10; /// # Errors /// Fails if unable to connect to the server pub async fn start( - RegularTelemetryConfig { + Config { name, url, max_retry_delay_exponent, min_retry_period, - }: RegularTelemetryConfig, + }: Config, telemetry: broadcast::Receiver, ) -> Result> { iroha_logger::info!(%url, "Starting telemetry"); @@ -164,10 +163,13 @@ where fn schedule_reconnect(&mut self) { self.retry_period.increase_exponent(); let period = self.retry_period.period(); - iroha_logger::debug!("Scheduled reconnecting to telemetry in {} seconds", period); + iroha_logger::debug!( + "Scheduled reconnecting to telemetry in {} seconds", + period.as_secs() + ); let sender = self.internal_sender.clone(); tokio::task::spawn(async move { - tokio::time::sleep(Duration::from_secs(period)).await; + tokio::time::sleep(period).await; let _ = sender.send(InternalMessage::Reconnect).await; }); } @@ -393,7 +395,7 @@ mod tests { fail: Arc::clone(&fail_factory_create), sender: message_sender, }, - RetryPeriod::new(1, 0), + RetryPeriod::new(Duration::from_secs(1), 0), internal_sender, ); tokio::task::spawn(async move { diff --git a/tools/kagami/src/config.rs b/tools/kagami/src/config.rs deleted file mode 100644 index 10c9aab9255..00000000000 --- a/tools/kagami/src/config.rs +++ /dev/null @@ -1,96 +0,0 @@ -use std::str::FromStr as _; - -use clap::{Parser, Subcommand}; -use iroha_crypto::{Algorithm, PrivateKey, PublicKey}; -use iroha_primitives::small::SmallStr; - -use super::*; - -#[derive(Parser, Debug, Clone)] -pub struct Args { - #[clap(subcommand)] - mode: Mode, -} - -#[derive(Subcommand, Debug, Clone)] -pub enum Mode { - Client(client::Args), - Peer(peer::Args), -} - -impl RunArgs for Args { - fn run(self, writer: &mut BufWriter) -> Outcome { - match self.mode { - Mode::Client(args) => args.run(writer), - Mode::Peer(args) => args.run(writer), - } - } -} - -mod client { - use iroha_config::{ - client::{BasicAuth, ConfigurationProxy, WebLogin}, - torii::uri::DEFAULT_API_ADDR, - }; - - use super::*; - - #[derive(ClapArgs, Debug, Clone, Copy)] - pub struct Args; - - impl RunArgs for Args { - fn run(self, writer: &mut BufWriter) -> Outcome { - let config = ConfigurationProxy { - chain_id: Some(ChainId::new("00000000-0000-0000-0000-000000000000")), - torii_api_url: Some(format!("http://{DEFAULT_API_ADDR}").parse()?), - account_id: Some("alice@wonderland".parse()?), - basic_auth: Some(Some(BasicAuth { - web_login: WebLogin::new("mad_hatter")?, - password: SmallStr::from_str("ilovetea"), - })), - public_key: Some(PublicKey::from_str( - "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0", - )?), - private_key: Some(PrivateKey::from_hex( - Algorithm::Ed25519, - "9AC47ABF59B356E0BD7DCBBBB4DEC080E302156A48CA907E47CB6AEA1D32719E7233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" - )?), - ..ConfigurationProxy::default() - } - .build()?; - writeln!(writer, "{}", serde_json::to_string_pretty(&config)?) - .wrap_err("Failed to write serialized client configuration to the buffer.") - } - } -} - -mod peer { - use std::path::PathBuf; - - use iroha_config::iroha::ConfigurationProxy as IrohaConfigurationProxy; - - use super::*; - - #[derive(ClapArgs, Debug, Clone)] - pub struct Args { - /// Specifies the value of `genesis.file` configuration parameter. - /// - /// Note: relative paths are not resolved but included as-is. - #[arg(long, value_name = "PATH")] - genesis_file_in_config: Option, - } - - impl RunArgs for Args { - fn run(self, writer: &mut BufWriter) -> Outcome { - let mut config = IrohaConfigurationProxy::default(); - - if let Some(path) = self.genesis_file_in_config { - let genesis = config.genesis.as_mut().unwrap(); - genesis.file = Some(Some(path)); - } - - writeln!(writer, "{}", serde_json::to_string_pretty(&config)?) - .wrap_err("Failed to write serialized peer configuration to the buffer.") - } - } -} diff --git a/tools/kagami/src/genesis.rs b/tools/kagami/src/genesis.rs index b873dd85c4c..4c6d5e67e75 100644 --- a/tools/kagami/src/genesis.rs +++ b/tools/kagami/src/genesis.rs @@ -1,7 +1,11 @@ use std::path::PathBuf; use clap::{ArgGroup, Parser, Subcommand}; -use iroha_config::{sumeragi::default::*, wasm::default::*, wsv::default::*}; +use iroha_config::parameters::defaults::chain_wide::{ + DEFAULT_BLOCK_TIME, DEFAULT_COMMIT_TIME, DEFAULT_IDENT_LENGTH_LIMITS, DEFAULT_MAX_TXS, + DEFAULT_METADATA_LIMITS, DEFAULT_TRANSACTION_LIMITS, DEFAULT_WASM_FUEL_LIMIT, + DEFAULT_WASM_MAX_MEMORY_BYTES, +}; use iroha_data_model::{ asset::AssetValueType, metadata::Limits, @@ -175,9 +179,9 @@ pub fn generate_default(executor: ExecutorMode) -> color_eyre::Result color_eyre::Result RunArgs for Args { @@ -63,7 +60,6 @@ impl RunArgs for Args { Crypto(args) => args.run(writer), Schema(args) => args.run(writer), Genesis(args) => args.run(writer), - Config(args) => args.run(writer), } } } diff --git a/tools/swarm/Cargo.toml b/tools/swarm/Cargo.toml index da057384bb0..ba26e2d5195 100644 --- a/tools/swarm/Cargo.toml +++ b/tools/swarm/Cargo.toml @@ -21,6 +21,7 @@ serde = { workspace = true, features = ["derive"] } clap = { workspace = true, features = ["derive"] } serde_yaml.workspace = true serde_json.workspace = true +serde_with = { workspace = true, features = ["json", "macros", "hex"] } derive_more.workspace = true inquire.workspace = true diff --git a/tools/swarm/README.md b/tools/swarm/README.md index 3db15c69284..d0bf02a9ea8 100644 --- a/tools/swarm/README.md +++ b/tools/swarm/README.md @@ -21,14 +21,14 @@ iroha_swarm ## Examples -Generate `docker-compose.dev.yml` with 5 peers, using `iroha` utf-8 bytes as a cryptographic seed, using `./configs/peer` as a directory with configuration, and using `.` as a directory with `Dockerfile` of Iroha: +Generate `docker-compose.dev.yml` with 5 peers, using `iroha` utf-8 bytes as a cryptographic seed, using `./peer_config` as a directory with configuration, and using `.` as a directory with `Dockerfile` of Iroha: ```bash iroha_swarm \ --build . \ --peers 5 \ --seed iroha \ - --config-dir ./configs/peer \ + --config-dir ./peer_config \ --outfile docker-compose.dev.yml ``` @@ -39,6 +39,6 @@ iroha_swarm \ --image hyperledger/iroha2:dev \ --peers 5 \ --seed iroha \ - --config-dir ./configs/peer \ + --config-dir ./peer_config \ --outfile docker-compose.dev.yml ``` diff --git a/tools/swarm/src/cli.rs b/tools/swarm/src/cli.rs index fc51fec01be..e72a247ebce 100644 --- a/tools/swarm/src/cli.rs +++ b/tools/swarm/src/cli.rs @@ -35,11 +35,13 @@ pub struct Cli { pub no_banner: bool, /// Path to a directory with Iroha configuration. It will be mapped as volume for containers. /// - /// The directory should contain `config.json` and `genesis.json` + /// The directory should contain `genesis.json` with executor. #[arg(long, short)] pub config_dir: PathBuf, #[command(flatten)] pub source: SourceArgs, + // TODO: add an argument to specify an optional configuration file path? + // or think about other ways for users to customise peers' configuration } #[derive(Args, Debug)] diff --git a/tools/swarm/src/compose.rs b/tools/swarm/src/compose.rs index 7fdf3b0997c..5a4f449a2cd 100644 --- a/tools/swarm/src/compose.rs +++ b/tools/swarm/src/compose.rs @@ -9,21 +9,18 @@ use std::{ use color_eyre::eyre::{eyre, Context, ContextCompat}; use iroha_crypto::{ - error::Error as IrohaCryptoError, KeyGenConfiguration, KeyPair, PrivateKey, PublicKey, + error::Error as IrohaCryptoError, Algorithm, KeyGenConfiguration, KeyPair, PrivateKey, + PublicKey, }; use iroha_data_model::{prelude::PeerId, ChainId}; use iroha_primitives::addr::{socket_addr, SocketAddr}; use peer_generator::Peer; -use serde::{ - ser::{Error as _, SerializeMap}, - Serialize, Serializer, -}; +use serde::{ser::SerializeMap, Serialize, Serializer}; use crate::{cli::SourceParsed, util::AbsolutePath}; /// Config directory inside of the docker image const DIR_CONFIG_IN_DOCKER: &str = "/config"; -const PATH_TO_CONFIG: &str = "/config/config.json"; const PATH_TO_GENESIS: &str = "/config/genesis.json"; const GENESIS_KEYPAIR_SEED: &[u8; 7] = b"genesis"; const COMMAND_SUBMIT_GENESIS: &str = "iroha --submit-genesis"; @@ -297,22 +294,25 @@ pub enum ServiceSource { Build(PathBuf), } +#[serde_with::serde_as] +#[serde_with::skip_serializing_none] #[derive(Serialize, Debug)] #[serde(rename_all = "UPPERCASE")] struct FullPeerEnv { - iroha_chain_id: ChainId, - iroha_config: String, - iroha_public_key: PublicKey, - iroha_private_key: SerializeAsJsonStr, - torii_p2p_addr: SocketAddr, - torii_api_url: SocketAddr, - iroha_genesis_public_key: PublicKey, - #[serde(skip_serializing_if = "Option::is_none")] - iroha_genesis_private_key: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - iroha_genesis_file: Option, - #[serde(skip_serializing_if = "Option::is_none")] - sumeragi_trusted_peers: Option>>, + chain_id: ChainId, + public_key: PublicKey, + private_key_digest: Algorithm, + #[serde_as(as = "serde_with::hex::Hex")] + private_key_payload: Vec, + p2p_address: SocketAddr, + api_address: SocketAddr, + genesis_public_key: PublicKey, + genesis_private_key_digest: Option, + #[serde_as(as = "Option")] + genesis_private_key_payload: Option>, + genesis_file: Option, + #[serde_as(as = "Option")] + sumeragi_trusted_peers: Option>, } struct CompactPeerEnv { @@ -328,53 +328,42 @@ struct CompactPeerEnv { impl From for FullPeerEnv { fn from(value: CompactPeerEnv) -> Self { - let (iroha_genesis_private_key, iroha_genesis_file) = - value - .genesis_private_key - .map_or((None, None), |private_key| { - ( - Some(SerializeAsJsonStr(private_key)), - Some(PATH_TO_GENESIS.to_string()), - ) - }); + let (genesis_private_key_digest, genesis_private_key_payload, genesis_file) = value + .genesis_private_key + .map_or((None, None, None), |private_key| { + let (algorithm, payload) = private_key.to_raw(); + ( + Some(algorithm), + Some(payload), + Some(PATH_TO_GENESIS.to_string()), + ) + }); + + let (private_key_digest, private_key_payload) = { + let (algorithm, payload) = value.key_pair.private_key().clone().to_raw(); + (algorithm, payload) + }; Self { - iroha_chain_id: value.chain_id, - iroha_config: PATH_TO_CONFIG.to_string(), - iroha_public_key: value.key_pair.public_key().clone(), - iroha_private_key: SerializeAsJsonStr(value.key_pair.private_key().clone()), - iroha_genesis_public_key: value.genesis_public_key, - iroha_genesis_private_key, - iroha_genesis_file, - torii_p2p_addr: value.p2p_addr, - torii_api_url: value.api_addr, + chain_id: value.chain_id, + public_key: value.key_pair.public_key().clone(), + private_key_digest, + private_key_payload, + genesis_public_key: value.genesis_public_key, + genesis_private_key_digest, + genesis_private_key_payload, + genesis_file, + p2p_address: value.p2p_addr, + api_address: value.api_addr, sumeragi_trusted_peers: if value.trusted_peers.is_empty() { None } else { - Some(SerializeAsJsonStr(value.trusted_peers)) + Some(value.trusted_peers) }, } } } -#[derive(Debug)] -struct SerializeAsJsonStr(T); - -impl serde::Serialize for SerializeAsJsonStr -where - T: serde::Serialize, -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let json = serde_json::to_string(&self.0).map_err(|json_err| { - S::Error::custom(format!("failed to serialize as JSON: {json_err}")) - })?; - serializer.serialize_str(&json) - } -} - #[derive(Debug)] pub struct DockerComposeBuilder<'a> { /// Needed to compute a relative source build path @@ -397,7 +386,7 @@ impl DockerComposeBuilder<'_> { ) })?; - let chain_id = ChainId::new("00000000-0000-0000-0000-000000000000"); + let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000"); let peers = peer_generator::generate_peers(self.peers, self.seed) .wrap_err("Failed to generate peers")?; let genesis_key_pair = generate_key_pair(self.seed, GENESIS_KEYPAIR_SEED) @@ -574,21 +563,17 @@ impl TryFrom for ResolvedImageSource { #[cfg(test)] mod tests { use std::{ - cell::RefCell, collections::{BTreeMap, BTreeSet, HashMap, HashSet}, - env::VarError, - ffi::OsStr, path::{Path, PathBuf}, str::FromStr, }; - use color_eyre::eyre::Context; use iroha_config::{ - base::proxy::{FetchEnv, LoadFromEnv, Override}, - iroha::ConfigurationProxy, + base::{FromEnv, TestEnv, UnwrapPartial}, + parameters::user::{CliContext, RootPartial}, }; use iroha_crypto::{KeyGenConfiguration, KeyPair}; - use iroha_primitives::addr::SocketAddr; + use iroha_primitives::addr::{socket_addr, SocketAddr}; use path_absolutize::Absolutize; use super::*; @@ -603,34 +588,12 @@ mod tests { } } - #[derive(Debug)] - struct TestEnv { - env: HashMap, - /// Set of env variables that weren't fetched yet - untouched: RefCell>, - } - impl From for TestEnv { fn from(peer_env: FullPeerEnv) -> Self { let json = serde_json::to_string(&peer_env).expect("Must be serializable"); - let env: HashMap<_, serde_json::Value> = + let env: HashMap<_, String> = serde_json::from_str(&json).expect("Must be deserializable into a hash map"); - let untouched = env.keys().cloned().collect(); - Self { - env: env - .into_iter() - .map(|(k, v)| { - let s = if let serde_json::Value::String(s) = v { - s - } else { - v.to_string() - }; - - (k, s) - }) - .collect(), - untouched: RefCell::new(untouched), - } + Self::with_map(env) } } @@ -641,54 +604,30 @@ mod tests { } } - impl FetchEnv for TestEnv { - fn fetch>(&self, key: K) -> Result { - let key_str = key - .as_ref() - .to_str() - .ok_or_else(|| VarError::NotUnicode(key.as_ref().into()))?; - - let res = self.env.get(key_str).ok_or(VarError::NotPresent).cloned(); - - if res.is_ok() { - self.untouched.borrow_mut().remove(key_str); - } - - res - } - } - - impl TestEnv { - fn assert_everything_covered(&self) { - assert_eq!(*self.untouched.borrow(), HashSet::new()); - } - } - #[test] fn default_config_with_swarm_env_is_exhaustive() { let keypair = KeyPair::generate(); let env: TestEnv = CompactPeerEnv { - chain_id: ChainId::new("00000000-0000-0000-0000-000000000000"), + chain_id: ChainId::from("00000000-0000-0000-0000-000000000000"), key_pair: keypair.clone(), genesis_public_key: keypair.public_key().clone(), genesis_private_key: Some(keypair.private_key().clone()), - p2p_addr: SocketAddr::from_str("127.0.0.1:1337").unwrap(), - api_addr: SocketAddr::from_str("127.0.0.1:1338").unwrap(), + p2p_addr: socket_addr!(127.0.0.1:1337), + api_addr: socket_addr!(127.0.0.1:1338), trusted_peers: BTreeSet::new(), } .into(); - // pretending like we've read `IROHA_CONFIG` env to know the config location - let _ = env.fetch("IROHA_CONFIG").expect("should be presented"); - let proxy = ConfigurationProxy::default() - .override_with(ConfigurationProxy::from_env(&env).expect("valid env")); - - let _cfg = proxy - .build() - .wrap_err("Failed to build configuration") - .expect("Default configuration with swarm's env should be exhaustive"); + let _cfg = RootPartial::from_env(&env) + .expect("valid env") + .unwrap_partial() + .expect("should not fail as input has all required fields") + .parse(CliContext { + submit_genesis: true, + }) + .expect("should not fail as input is valid"); - env.assert_everything_covered(); + assert_eq!(env.unvisited(), HashSet::new()); } #[test] @@ -705,7 +644,7 @@ mod tests { services: { let mut map = BTreeMap::new(); - let chain_id = ChainId::new("00000000-0000-0000-0000-000000000000"); + let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000"); let key_pair = KeyPair::generate_with_configuration(KeyGenConfiguration::from_seed(vec![ 1, 5, 1, 2, 2, 3, 4, 1, 2, 3, @@ -747,6 +686,7 @@ mod tests { }; let actual = serde_yaml::to_string(&compose).expect("Should be serialisable"); + #[allow(clippy::needless_raw_string_hashes)] let expected = expect_test::expect![[r#" version: '3.8' services: @@ -754,15 +694,16 @@ mod tests { build: . platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed012039E5BF092186FACC358770792A493CA98A83740643A3D41389483CF334F748C8 - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"db9d90d20f969177bd5882f9fe211d14d1399d5440d04e3468783d169bbc4a8e39e5bf092186facc358770792a493ca98a83740643a3d41389483cf334f748c8"}' - TORII_P2P_ADDR: iroha1:1339 - TORII_API_URL: iroha1:1338 - IROHA_GENESIS_PUBLIC_KEY: ed012039E5BF092186FACC358770792A493CA98A83740643A3D41389483CF334F748C8 - IROHA_GENESIS_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"db9d90d20f969177bd5882f9fe211d14d1399d5440d04e3468783d169bbc4a8e39e5bf092186facc358770792a493ca98a83740643a3d41389483cf334f748c8"}' - IROHA_GENESIS_FILE: /config/genesis.json + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed012039E5BF092186FACC358770792A493CA98A83740643A3D41389483CF334F748C8 + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: db9d90d20f969177bd5882f9fe211d14d1399d5440d04e3468783d169bbc4a8e39e5bf092186facc358770792a493ca98a83740643a3d41389483cf334f748c8 + P2P_ADDRESS: iroha1:1339 + API_ADDRESS: iroha1:1338 + GENESIS_PUBLIC_KEY: ed012039E5BF092186FACC358770792A493CA98A83740643A3D41389483CF334F748C8 + GENESIS_PRIVATE_KEY_DIGEST: ed25519 + GENESIS_PRIVATE_KEY_PAYLOAD: db9d90d20f969177bd5882f9fe211d14d1399d5440d04e3468783d169bbc4a8e39e5bf092186facc358770792a493ca98a83740643a3d41389483cf334f748c8 + GENESIS_FILE: /config/genesis.json ports: - 1337:1337 - 8080:8080 @@ -776,8 +717,8 @@ mod tests { } #[test] - fn empty_genesis_public_key_is_skipped_in_env() { - let chain_id = ChainId::new("00000000-0000-0000-0000-000000000000"); + fn empty_genesis_private_key_is_skipped_in_env() { + let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000"); let key_pair = KeyPair::generate_with_configuration(KeyGenConfiguration::from_seed(vec![0, 1, 2])) @@ -795,14 +736,15 @@ mod tests { .into(); let actual = serde_yaml::to_string(&env).unwrap(); + #[allow(clippy::needless_raw_string_hashes)] let expected = expect_test::expect![[r#" - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120415388A90FA238196737746A70565D041CFB32EAA0C89FF8CB244C7F832A6EBD - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"6bf163fd75192b81a78cb20c5f8cb917f591ac6635f2577e6ca305c27a456a5d415388a90fa238196737746a70565d041cfb32eaa0c89ff8cb244c7f832a6ebd"}' - TORII_P2P_ADDR: iroha0:1337 - TORII_API_URL: iroha0:1337 - IROHA_GENESIS_PUBLIC_KEY: ed0120415388A90FA238196737746A70565D041CFB32EAA0C89FF8CB244C7F832A6EBD + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed0120415388A90FA238196737746A70565D041CFB32EAA0C89FF8CB244C7F832A6EBD + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: 6bf163fd75192b81a78cb20c5f8cb917f591ac6635f2577e6ca305c27a456a5d415388a90fa238196737746a70565d041cfb32eaa0c89ff8cb244c7f832a6ebd + P2P_ADDRESS: iroha0:1337 + API_ADDRESS: iroha0:1337 + GENESIS_PUBLIC_KEY: ed0120415388A90FA238196737746A70565D041CFB32EAA0C89FF8CB244C7F832A6EBD "#]]; expected.assert_eq(&actual); } @@ -838,15 +780,16 @@ mod tests { build: ./iroha-cloned platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120F0321EB4139163C35F88BF78520FF7071499D7F4E79854550028A196C7B49E13 - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"5f8d1291bf6b762ee748a87182345d135fd167062857aa4f20ba39f25e74c4b0f0321eb4139163c35f88bf78520ff7071499d7f4e79854550028a196c7b49e13"}' - TORII_P2P_ADDR: 0.0.0.0:1337 - TORII_API_URL: 0.0.0.0:8080 - IROHA_GENESIS_PUBLIC_KEY: ed01203420F48A9EEB12513B8EB7DAF71979CE80A1013F5F341C10DCDA4F6AA19F97A9 - IROHA_GENESIS_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"5a6d5f06a90d29ad906e2f6ea8b41b4ef187849d0d397081a4a15ffcbe71e7c73420f48a9eeb12513b8eb7daf71979ce80a1013f5f341c10dcda4f6aa19f97a9"}' - IROHA_GENESIS_FILE: /config/genesis.json + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed0120F0321EB4139163C35F88BF78520FF7071499D7F4E79854550028A196C7B49E13 + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: 5f8d1291bf6b762ee748a87182345d135fd167062857aa4f20ba39f25e74c4b0f0321eb4139163c35f88bf78520ff7071499d7f4e79854550028a196c7b49e13 + P2P_ADDRESS: 0.0.0.0:1337 + API_ADDRESS: 0.0.0.0:8080 + GENESIS_PUBLIC_KEY: ed01203420F48A9EEB12513B8EB7DAF71979CE80A1013F5F341C10DCDA4F6AA19F97A9 + GENESIS_PRIVATE_KEY_DIGEST: ed25519 + GENESIS_PRIVATE_KEY_PAYLOAD: 5a6d5f06a90d29ad906e2f6ea8b41b4ef187849d0d397081a4a15ffcbe71e7c73420f48a9eeb12513b8eb7daf71979ce80a1013f5f341c10dcda4f6aa19f97a9 + GENESIS_FILE: /config/genesis.json SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha2:1339","public_key":"ed0120312C1B7B5DE23D366ADCF23CD6DB92CE18B2AA283C7D9F5033B969C2DC2B92F4"},{"address":"iroha3:1340","public_key":"ed0120854457B2E3D6082181DA73DC01C1E6F93A72D0C45268DC8845755287E98A5DEE"},{"address":"iroha1:1338","public_key":"ed0120A88554AA5C86D28D0EEBEC497235664433E807881CD31E12A1AF6C4D8B0F026C"}]' ports: - 1337:1337 @@ -865,13 +808,13 @@ mod tests { build: ./iroha-cloned platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120A88554AA5C86D28D0EEBEC497235664433E807881CD31E12A1AF6C4D8B0F026C - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8d34d2c6a699c61e7a9d5aabbbd07629029dfb4f9a0800d65aa6570113edb465a88554aa5c86d28d0eebec497235664433e807881cd31e12a1af6c4d8b0f026c"}' - TORII_P2P_ADDR: 0.0.0.0:1338 - TORII_API_URL: 0.0.0.0:8081 - IROHA_GENESIS_PUBLIC_KEY: ed01203420F48A9EEB12513B8EB7DAF71979CE80A1013F5F341C10DCDA4F6AA19F97A9 + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed0120A88554AA5C86D28D0EEBEC497235664433E807881CD31E12A1AF6C4D8B0F026C + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: 8d34d2c6a699c61e7a9d5aabbbd07629029dfb4f9a0800d65aa6570113edb465a88554aa5c86d28d0eebec497235664433e807881cd31e12a1af6c4d8b0f026c + P2P_ADDRESS: 0.0.0.0:1338 + API_ADDRESS: 0.0.0.0:8081 + GENESIS_PUBLIC_KEY: ed01203420F48A9EEB12513B8EB7DAF71979CE80A1013F5F341C10DCDA4F6AA19F97A9 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha2:1339","public_key":"ed0120312C1B7B5DE23D366ADCF23CD6DB92CE18B2AA283C7D9F5033B969C2DC2B92F4"},{"address":"iroha3:1340","public_key":"ed0120854457B2E3D6082181DA73DC01C1E6F93A72D0C45268DC8845755287E98A5DEE"},{"address":"iroha0:1337","public_key":"ed0120F0321EB4139163C35F88BF78520FF7071499D7F4E79854550028A196C7B49E13"}]' ports: - 1338:1338 @@ -889,13 +832,13 @@ mod tests { build: ./iroha-cloned platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120312C1B7B5DE23D366ADCF23CD6DB92CE18B2AA283C7D9F5033B969C2DC2B92F4 - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"cf4515a82289f312868027568c0da0ee3f0fde7fef1b69deb47b19fde7cbc169312c1b7b5de23d366adcf23cd6db92ce18b2aa283c7d9f5033b969c2dc2b92f4"}' - TORII_P2P_ADDR: 0.0.0.0:1339 - TORII_API_URL: 0.0.0.0:8082 - IROHA_GENESIS_PUBLIC_KEY: ed01203420F48A9EEB12513B8EB7DAF71979CE80A1013F5F341C10DCDA4F6AA19F97A9 + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed0120312C1B7B5DE23D366ADCF23CD6DB92CE18B2AA283C7D9F5033B969C2DC2B92F4 + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: cf4515a82289f312868027568c0da0ee3f0fde7fef1b69deb47b19fde7cbc169312c1b7b5de23d366adcf23cd6db92ce18b2aa283c7d9f5033b969c2dc2b92f4 + P2P_ADDRESS: 0.0.0.0:1339 + API_ADDRESS: 0.0.0.0:8082 + GENESIS_PUBLIC_KEY: ed01203420F48A9EEB12513B8EB7DAF71979CE80A1013F5F341C10DCDA4F6AA19F97A9 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha3:1340","public_key":"ed0120854457B2E3D6082181DA73DC01C1E6F93A72D0C45268DC8845755287E98A5DEE"},{"address":"iroha1:1338","public_key":"ed0120A88554AA5C86D28D0EEBEC497235664433E807881CD31E12A1AF6C4D8B0F026C"},{"address":"iroha0:1337","public_key":"ed0120F0321EB4139163C35F88BF78520FF7071499D7F4E79854550028A196C7B49E13"}]' ports: - 1339:1339 @@ -913,13 +856,13 @@ mod tests { build: ./iroha-cloned platform: linux/amd64 environment: - IROHA_CHAIN_ID: 00000000-0000-0000-0000-000000000000 - IROHA_CONFIG: /config/config.json - IROHA_PUBLIC_KEY: ed0120854457B2E3D6082181DA73DC01C1E6F93A72D0C45268DC8845755287E98A5DEE - IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"ab0e99c2b845b4ac7b3e88d25a860793c7eb600a25c66c75cba0bae91e955aa6854457b2e3d6082181da73dc01c1e6f93a72d0c45268dc8845755287e98a5dee"}' - TORII_P2P_ADDR: 0.0.0.0:1340 - TORII_API_URL: 0.0.0.0:8083 - IROHA_GENESIS_PUBLIC_KEY: ed01203420F48A9EEB12513B8EB7DAF71979CE80A1013F5F341C10DCDA4F6AA19F97A9 + CHAIN_ID: 00000000-0000-0000-0000-000000000000 + PUBLIC_KEY: ed0120854457B2E3D6082181DA73DC01C1E6F93A72D0C45268DC8845755287E98A5DEE + PRIVATE_KEY_DIGEST: ed25519 + PRIVATE_KEY_PAYLOAD: ab0e99c2b845b4ac7b3e88d25a860793c7eb600a25c66c75cba0bae91e955aa6854457b2e3d6082181da73dc01c1e6f93a72d0c45268dc8845755287e98a5dee + P2P_ADDRESS: 0.0.0.0:1340 + API_ADDRESS: 0.0.0.0:8083 + GENESIS_PUBLIC_KEY: ed01203420F48A9EEB12513B8EB7DAF71979CE80A1013F5F341C10DCDA4F6AA19F97A9 SUMERAGI_TRUSTED_PEERS: '[{"address":"iroha2:1339","public_key":"ed0120312C1B7B5DE23D366ADCF23CD6DB92CE18B2AA283C7D9F5033B969C2DC2B92F4"},{"address":"iroha1:1338","public_key":"ed0120A88554AA5C86D28D0EEBEC497235664433E807881CD31E12A1AF6C4D8B0F026C"},{"address":"iroha0:1337","public_key":"ed0120F0321EB4139163C35F88BF78520FF7071499D7F4E79854550028A196C7B49E13"}]' ports: - 1340:1340 diff --git a/torii/Cargo.toml b/torii/Cargo.toml index aa9359f97f1..3b363d6e01c 100644 --- a/torii/Cargo.toml +++ b/torii/Cargo.toml @@ -33,6 +33,7 @@ iroha_logger = { workspace = true } iroha_data_model = { workspace = true, features = ["http"] } iroha_version = { workspace = true, features = ["http"] } iroha_torii_derive = { workspace = true } +iroha_torii_const = { workspace = true } iroha_futures = { workspace = true } iroha_macro = { workspace = true } iroha_schema_gen = { workspace = true, optional = true } diff --git a/torii/const/Cargo.toml b/torii/const/Cargo.toml new file mode 100644 index 00000000000..ccabf87926b --- /dev/null +++ b/torii/const/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "iroha_torii_const" + +edition.workspace = true +version.workspace = true +authors.workspace = true + +description.workspace = true +repository.workspace = true +homepage.workspace = true +documentation.workspace = true + +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[lints] +workspace = true + +[dependencies] +iroha_primitives.workspace = true \ No newline at end of file diff --git a/torii/const/src/lib.rs b/torii/const/src/lib.rs new file mode 100644 index 00000000000..241522c09b6 --- /dev/null +++ b/torii/const/src/lib.rs @@ -0,0 +1,38 @@ +//! Constant values used in Torii that might be re-used by client libraries as well. + +pub mod uri { + //! URI that Torii uses to route incoming requests. + + /// Default socket for listening on external requests + pub const DEFAULT_API_ADDR: iroha_primitives::addr::SocketAddr = + iroha_primitives::addr::socket_addr!(127.0.0.1:8080); + /// Query URI is used to handle incoming Query requests. + pub const QUERY: &str = "query"; + /// Transaction URI is used to handle incoming ISI requests. + pub const TRANSACTION: &str = "transaction"; + /// Block URI is used to handle incoming Block requests. + pub const CONSENSUS: &str = "consensus"; + /// Health URI is used to handle incoming Healthcheck requests. + pub const HEALTH: &str = "health"; + /// The URI used for block synchronization. + pub const BLOCK_SYNC: &str = "block/sync"; + /// The web socket uri used to subscribe to block and transactions statuses. + pub const SUBSCRIPTION: &str = "events"; + /// The web socket uri used to subscribe to blocks stream. + pub const BLOCKS_STREAM: &str = "block/stream"; + /// Get pending transactions. + pub const MATCHING_PENDING_TRANSACTIONS: &str = "matching_pending_transactions"; + /// The URI for local config changing inspecting + pub const CONFIGURATION: &str = "configuration"; + /// URI to report status for administration + pub const STATUS: &str = "status"; + /// Metrics URI is used to export metrics according to [Prometheus + /// Guidance](https://prometheus.io/docs/instrumenting/writing_exporters/). + pub const METRICS: &str = "metrics"; + /// URI for retrieving the schema with which Iroha was built. + pub const SCHEMA: &str = "schema"; + /// URI for getting the API version currently used + pub const API_VERSION: &str = "api_version"; + /// URI for getting cpu profile + pub const PROFILE: &str = "debug/pprof/profile"; +} diff --git a/torii/src/lib.rs b/torii/src/lib.rs index 68507798239..700dc700315 100644 --- a/torii/src/lib.rs +++ b/torii/src/lib.rs @@ -13,7 +13,7 @@ use std::{ }; use futures::{stream::FuturesUnordered, StreamExt}; -use iroha_config::torii::{uri, Configuration as ToriiConfiguration}; +use iroha_config::parameters::actual::Torii as Config; use iroha_core::{ kiso::{Error as KisoError, KisoHandle}, kura::Kura, @@ -25,6 +25,7 @@ use iroha_core::{ }; use iroha_data_model::ChainId; use iroha_primitives::addr::SocketAddr; +use iroha_torii_const::uri; use tokio::{sync::Notify, task}; use utils::*; use warp::{ @@ -60,7 +61,7 @@ impl Torii { pub fn new( chain_id: ChainId, kiso: KisoHandle, - config: &ToriiConfiguration, + config: Config, queue: Arc, events: EventsSender, notify_shutdown: Arc, @@ -77,8 +78,8 @@ impl Torii { sumeragi, query_service, kura, - address: config.api_url.clone(), - transaction_max_content_length: config.max_content_len.into(), + address: config.address, + transaction_max_content_length: config.max_content_len_bytes, } } diff --git a/torii/src/routing.rs b/torii/src/routing.rs index f615b82ed60..fe72ff0e27d 100644 --- a/torii/src/routing.rs +++ b/torii/src/routing.rs @@ -8,7 +8,7 @@ #[cfg(feature = "telemetry")] use eyre::{eyre, WrapErr}; use futures::TryStreamExt; -use iroha_config::client_api::ConfigurationDTO; +use iroha_config::client_api::ConfigDTO; use iroha_core::{ query::store::LiveQueryStoreHandle, smartcontracts::query::ValidQueryRequest, sumeragi::SumeragiHandle, @@ -182,10 +182,7 @@ pub async fn handle_get_configuration(kiso: KisoHandle) -> Result { } #[iroha_futures::telemetry_future] -pub async fn handle_post_configuration( - kiso: KisoHandle, - value: ConfigurationDTO, -) -> Result { +pub async fn handle_post_configuration(kiso: KisoHandle, value: ConfigDTO) -> Result { kiso.update_with_dto(value).await?; Ok(reply::with_status(reply::reply(), StatusCode::ACCEPTED)) } @@ -327,22 +324,21 @@ pub async fn handle_version(sumeragi: SumeragiHandle) -> Json { } #[cfg(feature = "telemetry")] -pub fn handle_metrics(sumeragi: &SumeragiHandle) -> Result { +fn update_metrics_gracefully(sumeragi: &SumeragiHandle) { if let Err(error) = sumeragi.update_metrics() { - iroha_logger::error!(%error, "Error while calling sumeragi::update_metrics."); + iroha_logger::error!(%error, "Error while calling `sumeragi::update_metrics`."); } +} + +#[cfg(feature = "telemetry")] +pub fn handle_metrics(sumeragi: &SumeragiHandle) -> Result { + update_metrics_gracefully(sumeragi); sumeragi .metrics() .try_to_string() .map_err(Error::Prometheus) } -fn update_metrics_gracefully(sumeragi: &SumeragiHandle) { - if let Err(error) = sumeragi.update_metrics() { - iroha_logger::error!(%error, "Error while calling `sumeragi::update_metrics`."); - } -} - #[cfg(feature = "telemetry")] #[allow(clippy::unnecessary_wraps)] pub fn handle_status( @@ -424,7 +420,7 @@ pub mod profiling { { // Create profiler guard let guard = pprof::ProfilerGuardBuilder::default() - .frequency(frequency.get().into()) + .frequency(i32::from(frequency.get())) .blocklist(&["libc", "libgcc", "pthread", "vdso"]) .build() .map_err(|e| {